pax_global_header00006660000000000000000000000064136317707620014526gustar00rootroot0000000000000052 comment=d3157fa09d6e1073dd9905056cf659a5dd4923d6 influxql-1.1.0/000077500000000000000000000000001363177076200133675ustar00rootroot00000000000000influxql-1.1.0/.gitignore000066400000000000000000000004221363177076200153550ustar00rootroot00000000000000# Binaries for programs and plugins *.exe *.dll *.so *.dylib # Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/influxql-1.1.0/Jenkinsfile000066400000000000000000000013571363177076200155610ustar00rootroot00000000000000pipeline { agent { docker { image 'golang:1.9.2' } } stages { stage('Test') { steps { sh """ rm -f $WORKSPACE/test-results.{log,xml} mkdir -p /go/src/github.com/influxdata cp -a $WORKSPACE /go/src/github.com/influxdata/influxql cd /go/src/github.com/influxdata/influxql go get -v -t go test -v | tee $WORKSPACE/test-results.log """ } post { always { sh """ if [ -e test-results.log ]; then go get github.com/jstemmer/go-junit-report go-junit-report < $WORKSPACE/test-results.log > test-results.xml fi """ junit "test-results.xml" } } } } } influxql-1.1.0/LICENSE000066400000000000000000000020751363177076200144000ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2013-2016 Errplane Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. influxql-1.1.0/README.md000066400000000000000000001000451363177076200146460ustar00rootroot00000000000000# The Influx Query Language Specification ## Introduction This is a reference for the Influx Query Language ("InfluxQL"). InfluxQL is a SQL-like query language for interacting with InfluxDB. It has been lovingly crafted to feel familiar to those coming from other SQL or SQL-like environments while providing features specific to storing and analyzing time series data. ## Notation The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the same notation used in the [Go](http://golang.org) programming language specification, which can be found [here](https://golang.org/ref/spec). Not so coincidentally, InfluxDB is written in Go. ``` Production = production_name "=" [ Expression ] "." . Expression = Alternative { "|" Alternative } . Alternative = Term { Term } . Term = production_name | token [ "…" token ] | Group | Option | Repetition . Group = "(" Expression ")" . Option = "[" Expression "]" . Repetition = "{" Expression "}" . ``` Notation operators in order of increasing precedence: ``` | alternation () grouping [] option (0 or 1 times) {} repetition (0 to n times) ``` ## Comments Both single and multiline comments are supported. A comment is treated the same as whitespace by the parser. ``` -- single line comment /* multiline comment */ ``` Single line comments will skip all text until the scanner hits a newline. Multiline comments will skip all text until the end comment marker is hit. Nested multiline comments are not supported so the following does not work: ``` /* /* this does not work */ */ ``` ## Query representation ### Characters InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). ``` newline = /* the Unicode code point U+000A */ . unicode_char = /* an arbitrary Unicode code point except newline */ . ``` ## Letters and digits Letters are the set of ASCII characters plus the underscore character _ (U+005F) is considered a letter. Only decimal digits are supported. ``` letter = ascii_letter | "_" . ascii_letter = "A" … "Z" | "a" … "z" . digit = "0" … "9" . ``` ## Identifiers Identifiers are tokens which refer to database names, retention policy names, user names, measurement names, tag keys, and field keys. The rules: - double quoted identifiers can contain any unicode character other than a new line - double quoted identifiers can contain escaped `"` characters (i.e., `\"`) - double quoted identifiers can contain InfluxQL keywords - unquoted identifiers must start with an upper or lowercase ASCII character or "_" - unquoted identifiers may contain only ASCII letters, decimal digits, and "_" ``` identifier = unquoted_identifier | quoted_identifier . unquoted_identifier = ( letter ) { letter | digit } . quoted_identifier = `"` unicode_char { unicode_char } `"` . ``` #### Examples: ``` cpu _cpu_stats "1h" "anything really" "1_Crazy-1337.identifier>NAME👍" ``` ## Keywords ``` ALL ALTER ANALYZE ANY AS ASC BEGIN BY CREATE CONTINUOUS DATABASE DATABASES DEFAULT DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT DROP DURATION END EVERY EXPLAIN FIELD FOR FROM GRANT GRANTS GROUP GROUPS IN INF INSERT INTO KEY KEYS KILL LIMIT SHOW MEASUREMENT MEASUREMENTS NAME OFFSET ON ORDER PASSWORD POLICY POLICIES PRIVILEGES QUERIES QUERY READ REPLICATION RESAMPLE RETENTION REVOKE SELECT SERIES SET SHARD SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS TAG TO USER USERS VALUES WHERE WITH WRITE ``` ## Literals ### Integers InfluxQL supports decimal integer literals. Hexadecimal and octal literals are not currently supported. ``` int_lit = [ "+" | "-" ] ( "1" … "9" ) { digit } . ``` ### Floats InfluxQL supports floating-point literals. Exponents are not currently supported. ``` float_lit = [ "+" | "-" ] ( "." digit { digit } | digit { digit } "." { digit } ) . ``` ### Strings String literals must be surrounded by single quotes. Strings may contain `'` characters as long as they are escaped (i.e., `\'`). ``` string_lit = `'` { unicode_char } `'` . ``` ### Durations Duration literals specify a length of time. An integer literal followed immediately (with no spaces) by a duration unit listed below is interpreted as a duration literal. ### Duration units | Units | Meaning | |--------|-----------------------------------------| | u or µ | microseconds (1 millionth of a second) | | ms | milliseconds (1 thousandth of a second) | | s | second | | m | minute | | h | hour | | d | day | | w | week | ``` duration_lit = int_lit duration_unit . duration_unit = "u" | "µ" | "ms" | "s" | "m" | "h" | "d" | "w" . ``` ### Dates & Times The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM ``` time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . ``` ### Booleans ``` bool_lit = TRUE | FALSE . ``` ### Regular Expressions ``` regex_lit = "/" { unicode_char } "/" . ``` **Comparators:** `=~` matches against `!~` doesn't match against > **Note:** Use regular expressions to match measurements and tags. You cannot use regular expressions to match databases, retention policies, or fields. ## Queries A query is composed of one or more statements separated by a semicolon. ``` query = statement { ";" statement } . statement = alter_retention_policy_stmt | create_continuous_query_stmt | create_database_stmt | create_retention_policy_stmt | create_subscription_stmt | create_user_stmt | delete_stmt | drop_continuous_query_stmt | drop_database_stmt | drop_measurement_stmt | drop_retention_policy_stmt | drop_series_stmt | drop_shard_stmt | drop_subscription_stmt | drop_user_stmt | explain_stmt | grant_stmt | kill_query_statement | show_continuous_queries_stmt | show_databases_stmt | show_field_keys_stmt | show_grants_stmt | show_measurements_stmt | show_queries_stmt | show_retention_policies | show_series_stmt | show_shard_groups_stmt | show_shards_stmt | show_subscriptions_stmt| show_tag_keys_stmt | show_tag_values_stmt | show_users_stmt | revoke_stmt | select_stmt . ``` ## Statements ### ALTER RETENTION POLICY ``` alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause retention_policy_option [ retention_policy_option ] [ retention_policy_option ] [ retention_policy_option ] . ``` > Replication factors do not serve a purpose with single node instances. #### Examples: ```sql -- Set default retention policy for mydb to 1h.cpu. ALTER RETENTION POLICY "1h.cpu" ON "mydb" DEFAULT -- Change duration and replication factor. ALTER RETENTION POLICY "policy1" ON "somedb" DURATION 1h REPLICATION 4 ``` ### CREATE CONTINUOUS QUERY ``` create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause [ "RESAMPLE" resample_opts ] "BEGIN" select_stmt "END" . query_name = identifier . resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . every_stmt = "EVERY" duration_lit for_stmt = "FOR" duration_lit ``` #### Examples: ```sql -- selects from DEFAULT retention policy and writes into 6_months retention policy CREATE CONTINUOUS QUERY "10m_event_count" ON "db_name" BEGIN SELECT count("value") INTO "6_months"."events" FROM "events" GROUP BY time(10m) END; -- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy CREATE CONTINUOUS QUERY "1h_event_count" ON "db_name" BEGIN SELECT sum("count") as "count" INTO "2_years"."events" FROM "6_months"."events" GROUP BY time(1h) END; -- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time -- when resample is used, at least one of "EVERY" or "FOR" must be used CREATE CONTINUOUS QUERY "cpu_mean" ON "db_name" RESAMPLE EVERY 10s FOR 2m BEGIN SELECT mean("value") INTO "cpu_mean" FROM "cpu" GROUP BY time(1m) END; ``` ### CREATE DATABASE ``` create_database_stmt = "CREATE DATABASE" db_name [ WITH [ retention_policy_duration ] [ retention_policy_replication ] [ retention_policy_shard_group_duration ] [ retention_policy_name ] ] . ``` > Replication factors do not serve a purpose with single node instances. #### Examples: ```sql -- Create a database called foo CREATE DATABASE "foo" -- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp" -- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy CREATE DATABASE "mydb" WITH NAME "myrp" ``` ### CREATE RETENTION POLICY ``` create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause retention_policy_duration retention_policy_replication [ retention_policy_shard_group_duration ] [ "DEFAULT" ] . ``` > Replication factors do not serve a purpose with single node instances. #### Examples ```sql -- Create a retention policy. CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 -- Create a retention policy and set it as the DEFAULT. CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFAULT -- Create a retention policy and specify the shard group duration. CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m ``` ### CREATE SUBSCRIPTION Subscriptions tell InfluxDB to send all the data it receives to Kapacitor or other third parties. ``` create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . ``` #### Examples: ```sql -- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that send data to 'example.com:9090' via UDP. CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ALL 'udp://example.com:9090' -- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090' ``` ### CREATE USER ``` create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password [ "WITH ALL PRIVILEGES" ] . ``` #### Examples: ```sql -- Create a normal database user. CREATE USER "jdoe" WITH PASSWORD '1337password' -- Create an admin user. -- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. CREATE USER "jdoe" WITH PASSWORD '1337password' WITH ALL PRIVILEGES ``` > **Note:** The password string must be wrapped in single quotes. ### DELETE ``` delete_stmt = "DELETE" ( from_clause | where_clause | from_clause where_clause ) . ``` #### Examples: ```sql DELETE FROM "cpu" DELETE FROM "cpu" WHERE time < '2000-01-01T00:00:00Z' DELETE WHERE time < '2000-01-01T00:00:00Z' ``` ### DROP CONTINUOUS QUERY ``` drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . ``` #### Example: ```sql DROP CONTINUOUS QUERY "myquery" ON "mydb" ``` ### DROP DATABASE ``` drop_database_stmt = "DROP DATABASE" db_name . ``` #### Example: ```sql DROP DATABASE "mydb" ``` ### DROP MEASUREMENT ``` drop_measurement_stmt = "DROP MEASUREMENT" measurement . ``` #### Examples: ```sql -- drop the cpu measurement DROP MEASUREMENT "cpu" ``` ### DROP RETENTION POLICY ``` drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . ``` #### Example: ```sql -- drop the retention policy named 1h.cpu from mydb DROP RETENTION POLICY "1h.cpu" ON "mydb" ``` ### DROP SERIES ``` drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . ``` #### Example: ```sql DROP SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' ``` ### DROP SHARD ``` drop_shard_stmt = "DROP SHARD" ( shard_id ) . ``` #### Example: ``` DROP SHARD 1 ``` ### DROP SUBSCRIPTION ``` drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . ``` #### Example: ```sql DROP SUBSCRIPTION "sub0" ON "mydb"."autogen" ``` ### DROP USER ``` drop_user_stmt = "DROP USER" user_name . ``` #### Example: ```sql DROP USER "jdoe" ``` ### EXPLAIN > **NOTE:** This functionality is unimplemented. ``` explain_stmt = "EXPLAIN" [ "ANALYZE" ] select_stmt . ``` ### GRANT > **NOTE:** Users can be granted privileges on databases that do not exist. ``` grant_stmt = "GRANT" privilege [ on_clause ] to_clause . ``` #### Examples: ```sql -- grant admin privileges GRANT ALL TO "jdoe" -- grant read access to a database GRANT READ ON "mydb" TO "jdoe" ``` ### KILL QUERY ``` kill_query_statement = "KILL QUERY" query_id . ``` #### Examples: ``` --- kill a query with the query_id 36 KILL QUERY 36 ``` > **NOTE:** Identify the `query_id` from the `SHOW QUERIES` output. ### SHOW CONTINUOUS QUERIES ``` show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . ``` #### Example: ```sql -- show all continuous queries SHOW CONTINUOUS QUERIES ``` ### SHOW DATABASES ``` show_databases_stmt = "SHOW DATABASES" . ``` #### Example: ```sql -- show all databases SHOW DATABASES ``` ### SHOW FIELD KEYS ``` show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . ``` #### Examples: ```sql -- show field keys and field value data types from all measurements SHOW FIELD KEYS -- show field keys and field value data types from specified measurement SHOW FIELD KEYS FROM "cpu" ``` ### SHOW GRANTS ``` show_grants_stmt = "SHOW GRANTS FOR" user_name . ``` #### Example: ```sql -- show grants for jdoe SHOW GRANTS FOR "jdoe" ``` ### SHOW MEASUREMENTS ``` show_measurements_stmt = "SHOW MEASUREMENTS" [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . ``` #### Examples: ```sql -- show all measurements SHOW MEASUREMENTS -- show measurements where region tag = 'uswest' AND host tag = 'serverA' SHOW MEASUREMENTS WHERE "region" = 'uswest' AND "host" = 'serverA' -- show measurements that start with 'h2o' SHOW MEASUREMENTS WITH MEASUREMENT =~ /h2o.*/ ``` ### SHOW QUERIES ``` show_queries_stmt = "SHOW QUERIES" . ``` #### Example: ```sql -- show all currently-running queries SHOW QUERIES ``` ### SHOW RETENTION POLICIES ``` show_retention_policies = "SHOW RETENTION POLICIES" on_clause . ``` #### Example: ```sql -- show all retention policies on a database SHOW RETENTION POLICIES ON "mydb" ``` ### SHOW SERIES ``` show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . ``` #### Example: ```sql SHOW SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' ``` ### SHOW SHARD GROUPS ``` show_shard_groups_stmt = "SHOW SHARD GROUPS" . ``` #### Example: ```sql SHOW SHARD GROUPS ``` ### SHOW SHARDS ``` show_shards_stmt = "SHOW SHARDS" . ``` #### Example: ```sql SHOW SHARDS ``` ### SHOW SUBSCRIPTIONS ``` show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . ``` #### Example: ```sql SHOW SUBSCRIPTIONS ``` ### SHOW TAG KEYS ``` show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ``` #### Examples: ```sql -- show all tag keys SHOW TAG KEYS -- show all tag keys from the cpu measurement SHOW TAG KEYS FROM "cpu" -- show all tag keys from the cpu measurement where the region key = 'uswest' SHOW TAG KEYS FROM "cpu" WHERE "region" = 'uswest' -- show all tag keys where the host key = 'serverA' SHOW TAG KEYS WHERE "host" = 'serverA' ``` ### SHOW TAG VALUES ``` show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] [ group_by_clause ] [ limit_clause ] [ offset_clause ] . ``` #### Examples: ```sql -- show all tag values across all measurements for the region tag SHOW TAG VALUES WITH KEY = "region" -- show tag values from the cpu measurement for the region tag SHOW TAG VALUES FROM "cpu" WITH KEY = "region" -- show tag values across all measurements for all tag keys that do not include the letter c SHOW TAG VALUES WITH KEY !~ /.*c.*/ -- show tag values from the cpu measurement for region & host tag keys where service = 'redis' SHOW TAG VALUES FROM "cpu" WITH KEY IN ("region", "host") WHERE "service" = 'redis' ``` ### SHOW USERS ``` show_users_stmt = "SHOW USERS" . ``` #### Example: ```sql -- show all users SHOW USERS ``` ### REVOKE ``` revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . ``` #### Examples: ```sql -- revoke admin privileges from jdoe REVOKE ALL PRIVILEGES FROM "jdoe" -- revoke read privileges from jdoe on mydb REVOKE READ ON "mydb" FROM "jdoe" ``` ### SELECT ``` select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] [ group_by_clause ] [ order_by_clause ] [ limit_clause ] [ offset_clause ] [ slimit_clause ] [ soffset_clause ] [ timezone_clause ] . ``` #### Examples: ```sql -- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals SELECT mean("value") FROM "cpu" WHERE "region" = 'uswest' GROUP BY time(10m) fill(0) -- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy SELECT mean("value") INTO "cpu_1h".:MEASUREMENT FROM /cpu.*/ -- select from measurements grouped by the day with a timezone SELECT mean("value") FROM "cpu" GROUP BY region, time(1d) fill(0) tz("America/Chicago") ``` ## Clauses ``` from_clause = "FROM" measurements . group_by_clause = "GROUP BY" dimensions fill(fill_option). into_clause = "INTO" ( measurement | back_ref ). limit_clause = "LIMIT" int_lit . offset_clause = "OFFSET" int_lit . slimit_clause = "SLIMIT" int_lit . soffset_clause = "SOFFSET" int_lit . timezone_clause = tz(string_lit) . on_clause = "ON" db_name . order_by_clause = "ORDER BY" sort_fields . to_clause = "TO" user_name . where_clause = "WHERE" expr . with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . with_tag_clause = "WITH KEY" ( "=" tag_key | "!=" tag_key | "=~" regex_lit | "IN (" tag_keys ")" ) . ``` ## Expressions ``` binary_op = "+" | "-" | "*" | "/" | "%" | "&" | "|" | "^" | "AND" | "OR" | "=" | "!=" | "<>" | "<" | "<=" | ">" | ">=" . expr = unary_expr { binary_op unary_expr } . unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | float_lit | bool_lit | duration_lit | regex_lit . ``` ## Other ``` alias = "AS" identifier . back_ref = ( policy_name ".:MEASUREMENT" ) | ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . db_name = identifier . dimension = expr . dimensions = dimension { "," dimension } . field_key = identifier . field = expr [ alias ] . fields = field { "," field } . fill_option = "null" | "none" | "previous" | "linear" | int_lit | float_lit . host = string_lit . measurement = measurement_name | ( policy_name "." measurement_name ) | ( db_name "." [ policy_name ] "." measurement_name ) . measurements = measurement { "," measurement } . measurement_name = identifier | regex_lit . password = string_lit . policy_name = identifier . privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . query_id = int_lit . query_name = identifier . retention_policy = identifier . retention_policy_option = retention_policy_duration | retention_policy_replication | retention_policy_shard_group_duration | "DEFAULT" . retention_policy_duration = "DURATION" duration_lit . retention_policy_replication = "REPLICATION" int_lit . retention_policy_shard_group_duration = "SHARD DURATION" duration_lit . retention_policy_name = "NAME" identifier . series_id = int_lit . shard_id = int_lit . sort_field = field_key [ ASC | DESC ] . sort_fields = sort_field { "," sort_field } . subscription_name = identifier . tag_key = identifier . tag_keys = tag_key { "," tag_key } . user_name = identifier . var_ref = measurement . ``` ## Query Engine Internals Once you understand the language itself, it's important to know how these language constructs are implemented in the query engine. This gives you an intuitive sense for how results will be processed and how to create efficient queries. The life cycle of a query looks like this: 1. InfluxQL query string is tokenized and then parsed into an abstract syntax tree (AST). This is the code representation of the query itself. 2. The AST is passed to the `QueryExecutor` which directs queries to the appropriate handlers. For example, queries related to meta data are executed by the meta service and `SELECT` statements are executed by the shards themselves. 3. The query engine then determines the shards that match the `SELECT` statement's time range. From these shards, iterators are created for each field in the statement. 4. Iterators are passed to the emitter which drains them and joins the resulting points. The emitter's job is to convert simple time/value points into the more complex result objects that are returned to the client. ### Understanding Iterators Iterators are at the heart of the query engine. They provide a simple interface for looping over a set of points. For example, this is an iterator over Float points: ``` type FloatIterator interface { Next() (*FloatPoint, error) } ``` These iterators are created through the `IteratorCreator` interface: ``` type IteratorCreator interface { CreateIterator(m *Measurement, opt IteratorOptions) (Iterator, error) } ``` The `IteratorOptions` provide arguments about field selection, time ranges, and dimensions that the iterator creator can use when planning an iterator. The `IteratorCreator` interface is used at many levels such as the `Shards`, `Shard`, and `Engine`. This allows optimizations to be performed when applicable such as returning a precomputed `COUNT()`. Iterators aren't just for reading raw data from storage though. Iterators can be composed so that they provided additional functionality around an input iterator. For example, a `DistinctIterator` can compute the distinct values for each time window for an input iterator. Or a `FillIterator` can generate additional points that are missing from an input iterator. This composition also lends itself well to aggregation. For example, a statement such as this: ``` SELECT MEAN(value) FROM cpu GROUP BY time(10m) ``` In this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the underlying shards. However, if we can add an additional iterator to determine the derivative of the mean: ``` SELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m) ``` ### Understanding Auxiliary Fields Because InfluxQL allows users to use selector functions such as `FIRST()`, `LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related data at the same time with the selected point. For example, in this query: ``` SELECT FIRST(value), host FROM cpu GROUP BY time(1h) ``` We are selecting the first `value` that occurs every hour but we also want to retrieve the `host` associated with that point. Since the `Point` types only specify a single typed `Value` for efficiency, we push the `host` into the auxiliary fields of the point. These auxiliary fields are attached to the point until it is passed to the emitter where the fields get split off to their own iterator. ### Built-in Iterators There are many helper iterators that let us build queries: * Merge Iterator - This iterator combines one or more iterators into a single new iterator of the same type. This iterator guarantees that all points within a window will be output before starting the next window but does not provide ordering guarantees within the window. This allows for fast access for aggregate queries which do not need stronger sorting guarantees. * Sorted Merge Iterator - This iterator also combines one or more iterators into a new iterator of the same type. However, this iterator guarantees time ordering of every point. This makes it slower than the `MergeIterator` but this ordering guarantee is required for non-aggregate queries which return the raw data points. * Limit Iterator - This iterator limits the number of points per name/tag group. This is the implementation of the `LIMIT` & `OFFSET` syntax. * Fill Iterator - This iterator injects extra points if they are missing from the input iterator. It can provide `null` points, points with the previous value, or points with a specific value. * Buffered Iterator - This iterator provides the ability to "unread" a point back onto a buffer so it can be read again next time. This is used extensively to provide lookahead for windowing. * Reduce Iterator - This iterator calls a reduction function for each point in a window. When the window is complete then all points for that window are output. This is used for simple aggregate functions such as `COUNT()`. * Reduce Slice Iterator - This iterator collects all points for a window first and then passes them all to a reduction function at once. The results are returned from the iterator. This is used for aggregate functions such as `DERIVATIVE()`. * Transform Iterator - This iterator calls a transform function for each point from an input iterator. This is used for executing binary expressions. * Dedupe Iterator - This iterator only outputs unique points. It is resource intensive so it is only used for small queries such as meta query statements. ### Call Iterators Function calls in InfluxQL are implemented at two levels. Some calls can be wrapped at multiple layers to improve efficiency. For example, a `COUNT()` can be performed at the shard level and then multiple `CountIterator`s can be wrapped with another `CountIterator` to compute the count of all shards. These iterators can be created using `NewCallIterator()`. Some iterators are more complex or need to be implemented at a higher level. For example, the `DERIVATIVE()` needs to retrieve all points for a window first before performing the calculation. This iterator is created by the engine itself and is never requested to be created by the lower levels. ### Subqueries Subqueries are built on top of iterators. Most of the work involved in supporting subqueries is in organizing how data is streamed to the iterators that will process the data. The final ordering of the stream has to output all points from one series before moving to the next series and it also needs to ensure those points are printed in order. So there are two separate concepts we need to consider when creating an iterator: ordering and grouping. When an inner query has a different grouping than the outermost query, we still need to group together related points into buckets, but we do not have to ensure that all points from one buckets are output before the points in another bucket. In fact, if we do that, we will be unable to perform the grouping for the outer query correctly. Instead, we group all points by the outermost query for an interval and then, within that interval, we group the points for the inner query. For example, here are series keys and times in seconds (fields are omitted since they don't matter in this example): cpu,host=server01 0 cpu,host=server01 10 cpu,host=server01 20 cpu,host=server01 30 cpu,host=server02 0 cpu,host=server02 10 cpu,host=server02 20 cpu,host=server02 30 With the following query: SELECT mean(max) FROM (SELECT max(value) FROM cpu GROUP BY host, time(20s)) GROUP BY time(20s) The final grouping keeps all of the points together which means we need to group `server01` with `server02`. That means we output the points from the underlying engine like this: cpu,host=server01 0 cpu,host=server01 10 cpu,host=server02 0 cpu,host=server02 10 cpu,host=server01 20 cpu,host=server01 30 cpu,host=server02 20 cpu,host=server02 30 Within each one of those time buckets, we calculate the `max()` value for each unique host so the output stream gets transformed to look like this: cpu,host=server01 0 cpu,host=server02 0 cpu,host=server01 20 cpu,host=server02 20 Then we can process the `mean()` on this stream of data instead and it will be output in the correct order. This is true of any order of grouping since grouping can only go from more specific to less specific. When it comes to ordering, unordered data is faster to process, but we always need to produce ordered data. When processing a raw query with no aggregates, we need to ensure data coming from the engine is ordered so the output is ordered. When we have an aggregate, we know one point is being emitted for each interval and will always produce ordered output. So for aggregates, we can take unordered data as the input and get ordered output. Any ordered data as input will always result in ordered data so we just need to look at how an iterator processes unordered data. | | raw query | selector (without group by time) | selector (with group by time) | aggregator | |-----------------|------------------|----------------------------------|-------------------------------|----------------| | ordered input | ordered output | ordered output | ordered output | ordered output | | unordered input | unordered output | unordered output | ordered output | ordered output | Since we always need ordered output, we just need to work backwards and determine which pattern of input gives us ordered output. If both ordered and unordered input produce ordered output, we prefer unordered input since it is faster. There are also certain aggregates that require ordered input like `median()` and `percentile()`. These functions will explicitly request ordered input. It is also important to realize that selectors that are grouped by time are the equivalent of an aggregator. It is only selectors without a group by time that are different. influxql-1.1.0/ast.go000066400000000000000000004702741363177076200145230ustar00rootroot00000000000000package influxql import ( "bytes" "errors" "fmt" "math" "regexp" "regexp/syntax" "sort" "strconv" "strings" "time" "github.com/gogo/protobuf/proto" internal "github.com/influxdata/influxql/internal" ) // DataType represents the primitive data types available in InfluxQL. type DataType int const ( // Unknown primitive data type. Unknown DataType = 0 // Float means the data type is a float. Float DataType = 1 // Integer means the data type is an integer. Integer DataType = 2 // String means the data type is a string of text. String DataType = 3 // Boolean means the data type is a boolean. Boolean DataType = 4 // Time means the data type is a time. Time DataType = 5 // Duration means the data type is a duration of time. Duration DataType = 6 // Tag means the data type is a tag. Tag DataType = 7 // AnyField means the data type is any field. AnyField DataType = 8 // Unsigned means the data type is an unsigned integer. Unsigned DataType = 9 ) const ( // MinTime is the minumum time that can be represented. // // 1677-09-21 00:12:43.145224194 +0000 UTC // // The two lowest minimum integers are used as sentinel values. The // minimum value needs to be used as a value lower than any other value for // comparisons and another separate value is needed to act as a sentinel // default value that is unusable by the user, but usable internally. // Because these two values need to be used for a special purpose, we do // not allow users to write points at these two times. MinTime = int64(math.MinInt64) + 2 // MaxTime is the maximum time that can be represented. // // 2262-04-11 23:47:16.854775806 +0000 UTC // // The highest time represented by a nanosecond needs to be used for an // exclusive range in the shard group, so the maximum time needs to be one // less than the possible maximum number of nanoseconds representable by an // int64 so that we don't lose a point at that one time. MaxTime = int64(math.MaxInt64) - 1 ) var ( // ErrInvalidTime is returned when the timestamp string used to // compare against time field is invalid. ErrInvalidTime = errors.New("invalid timestamp string") ) // InspectDataType returns the data type of a given value. func InspectDataType(v interface{}) DataType { switch v.(type) { case float64: return Float case int64, int32, int: return Integer case string: return String case bool: return Boolean case uint64: return Unsigned case time.Time: return Time case time.Duration: return Duration default: return Unknown } } // DataTypeFromString returns a data type given the string representation of that // data type. func DataTypeFromString(s string) DataType { switch s { case "float": return Float case "integer": return Integer case "unsigned": return Unsigned case "string": return String case "boolean": return Boolean case "time": return Time case "duration": return Duration case "tag": return Tag case "field": return AnyField default: return Unknown } } // LessThan returns true if the other DataType has greater precedence than the // current data type. Unknown has the lowest precedence. // // NOTE: This is not the same as using the `<` or `>` operator because the // integers used decrease with higher precedence, but Unknown is the lowest // precedence at the zero value. func (d DataType) LessThan(other DataType) bool { if d == Unknown { return true } else if d == Unsigned { return other != Unknown && other <= Integer } else if other == Unsigned { return d >= String } return other != Unknown && other < d } var ( zeroFloat64 interface{} = float64(0) zeroInt64 interface{} = int64(0) zeroUint64 interface{} = uint64(0) zeroString interface{} = "" zeroBoolean interface{} = false zeroTime interface{} = time.Time{} zeroDuration interface{} = time.Duration(0) ) // Zero returns the zero value for the DataType. // The return value of this method, when sent back to InspectDataType, // may not produce the same value. func (d DataType) Zero() interface{} { switch d { case Float: return zeroFloat64 case Integer: return zeroInt64 case Unsigned: return zeroUint64 case String, Tag: return zeroString case Boolean: return zeroBoolean case Time: return zeroTime case Duration: return zeroDuration } return nil } // String returns the human-readable string representation of the DataType. func (d DataType) String() string { switch d { case Float: return "float" case Integer: return "integer" case Unsigned: return "unsigned" case String: return "string" case Boolean: return "boolean" case Time: return "time" case Duration: return "duration" case Tag: return "tag" case AnyField: return "field" } return "unknown" } // Node represents a node in the InfluxDB abstract syntax tree. type Node interface { // node is unexported to ensure implementations of Node // can only originate in this package. node() String() string } func (*Query) node() {} func (Statements) node() {} func (*AlterRetentionPolicyStatement) node() {} func (*CreateContinuousQueryStatement) node() {} func (*CreateDatabaseStatement) node() {} func (*CreateRetentionPolicyStatement) node() {} func (*CreateSubscriptionStatement) node() {} func (*CreateUserStatement) node() {} func (*Distinct) node() {} func (*DeleteSeriesStatement) node() {} func (*DeleteStatement) node() {} func (*DropContinuousQueryStatement) node() {} func (*DropDatabaseStatement) node() {} func (*DropMeasurementStatement) node() {} func (*DropRetentionPolicyStatement) node() {} func (*DropSeriesStatement) node() {} func (*DropShardStatement) node() {} func (*DropSubscriptionStatement) node() {} func (*DropUserStatement) node() {} func (*ExplainStatement) node() {} func (*GrantStatement) node() {} func (*GrantAdminStatement) node() {} func (*KillQueryStatement) node() {} func (*RevokeStatement) node() {} func (*RevokeAdminStatement) node() {} func (*SelectStatement) node() {} func (*SetPasswordUserStatement) node() {} func (*ShowContinuousQueriesStatement) node() {} func (*ShowGrantsForUserStatement) node() {} func (*ShowDatabasesStatement) node() {} func (*ShowFieldKeyCardinalityStatement) node() {} func (*ShowFieldKeysStatement) node() {} func (*ShowRetentionPoliciesStatement) node() {} func (*ShowMeasurementCardinalityStatement) node() {} func (*ShowMeasurementsStatement) node() {} func (*ShowQueriesStatement) node() {} func (*ShowSeriesStatement) node() {} func (*ShowSeriesCardinalityStatement) node() {} func (*ShowShardGroupsStatement) node() {} func (*ShowShardsStatement) node() {} func (*ShowStatsStatement) node() {} func (*ShowSubscriptionsStatement) node() {} func (*ShowDiagnosticsStatement) node() {} func (*ShowTagKeyCardinalityStatement) node() {} func (*ShowTagKeysStatement) node() {} func (*ShowTagValuesCardinalityStatement) node() {} func (*ShowTagValuesStatement) node() {} func (*ShowUsersStatement) node() {} func (*BinaryExpr) node() {} func (*BooleanLiteral) node() {} func (*BoundParameter) node() {} func (*Call) node() {} func (*Dimension) node() {} func (Dimensions) node() {} func (*DurationLiteral) node() {} func (*IntegerLiteral) node() {} func (*UnsignedLiteral) node() {} func (*Field) node() {} func (Fields) node() {} func (*Measurement) node() {} func (Measurements) node() {} func (*NilLiteral) node() {} func (*NumberLiteral) node() {} func (*ParenExpr) node() {} func (*RegexLiteral) node() {} func (*ListLiteral) node() {} func (*SortField) node() {} func (SortFields) node() {} func (Sources) node() {} func (*StringLiteral) node() {} func (*SubQuery) node() {} func (*Target) node() {} func (*TimeLiteral) node() {} func (*VarRef) node() {} func (*Wildcard) node() {} // Query represents a collection of ordered statements. type Query struct { Statements Statements } // String returns a string representation of the query. func (q *Query) String() string { return q.Statements.String() } // Statements represents a list of statements. type Statements []Statement // String returns a string representation of the statements. func (a Statements) String() string { var str []string for _, stmt := range a { str = append(str, stmt.String()) } return strings.Join(str, ";\n") } // Statement represents a single command in InfluxQL. type Statement interface { Node // stmt is unexported to ensure implementations of Statement // can only originate in this package. stmt() RequiredPrivileges() (ExecutionPrivileges, error) } // HasDefaultDatabase provides an interface to get the default database from a Statement. type HasDefaultDatabase interface { Node // stmt is unexported to ensure implementations of HasDefaultDatabase // can only originate in this package. stmt() DefaultDatabase() string } // ExecutionPrivilege is a privilege required for a user to execute // a statement on a database or resource. type ExecutionPrivilege struct { // Admin privilege required. Admin bool // Name of the database. Name string // Database privilege required. Privilege Privilege } // ExecutionPrivileges is a list of privileges required to execute a statement. type ExecutionPrivileges []ExecutionPrivilege func (*AlterRetentionPolicyStatement) stmt() {} func (*CreateContinuousQueryStatement) stmt() {} func (*CreateDatabaseStatement) stmt() {} func (*CreateRetentionPolicyStatement) stmt() {} func (*CreateSubscriptionStatement) stmt() {} func (*CreateUserStatement) stmt() {} func (*DeleteSeriesStatement) stmt() {} func (*DeleteStatement) stmt() {} func (*DropContinuousQueryStatement) stmt() {} func (*DropDatabaseStatement) stmt() {} func (*DropMeasurementStatement) stmt() {} func (*DropRetentionPolicyStatement) stmt() {} func (*DropSeriesStatement) stmt() {} func (*DropSubscriptionStatement) stmt() {} func (*DropUserStatement) stmt() {} func (*ExplainStatement) stmt() {} func (*GrantStatement) stmt() {} func (*GrantAdminStatement) stmt() {} func (*KillQueryStatement) stmt() {} func (*ShowContinuousQueriesStatement) stmt() {} func (*ShowGrantsForUserStatement) stmt() {} func (*ShowDatabasesStatement) stmt() {} func (*ShowFieldKeyCardinalityStatement) stmt() {} func (*ShowFieldKeysStatement) stmt() {} func (*ShowMeasurementCardinalityStatement) stmt() {} func (*ShowMeasurementsStatement) stmt() {} func (*ShowQueriesStatement) stmt() {} func (*ShowRetentionPoliciesStatement) stmt() {} func (*ShowSeriesStatement) stmt() {} func (*ShowSeriesCardinalityStatement) stmt() {} func (*ShowShardGroupsStatement) stmt() {} func (*ShowShardsStatement) stmt() {} func (*ShowStatsStatement) stmt() {} func (*DropShardStatement) stmt() {} func (*ShowSubscriptionsStatement) stmt() {} func (*ShowDiagnosticsStatement) stmt() {} func (*ShowTagKeyCardinalityStatement) stmt() {} func (*ShowTagKeysStatement) stmt() {} func (*ShowTagValuesCardinalityStatement) stmt() {} func (*ShowTagValuesStatement) stmt() {} func (*ShowUsersStatement) stmt() {} func (*RevokeStatement) stmt() {} func (*RevokeAdminStatement) stmt() {} func (*SelectStatement) stmt() {} func (*SetPasswordUserStatement) stmt() {} // Expr represents an expression that can be evaluated to a value. type Expr interface { Node // expr is unexported to ensure implementations of Expr // can only originate in this package. expr() } func (*BinaryExpr) expr() {} func (*BooleanLiteral) expr() {} func (*BoundParameter) expr() {} func (*Call) expr() {} func (*Distinct) expr() {} func (*DurationLiteral) expr() {} func (*IntegerLiteral) expr() {} func (*UnsignedLiteral) expr() {} func (*NilLiteral) expr() {} func (*NumberLiteral) expr() {} func (*ParenExpr) expr() {} func (*RegexLiteral) expr() {} func (*ListLiteral) expr() {} func (*StringLiteral) expr() {} func (*TimeLiteral) expr() {} func (*VarRef) expr() {} func (*Wildcard) expr() {} // Literal represents a static literal. type Literal interface { Expr // literal is unexported to ensure implementations of Literal // can only originate in this package. literal() } func (*BooleanLiteral) literal() {} func (*BoundParameter) literal() {} func (*DurationLiteral) literal() {} func (*IntegerLiteral) literal() {} func (*UnsignedLiteral) literal() {} func (*NilLiteral) literal() {} func (*NumberLiteral) literal() {} func (*RegexLiteral) literal() {} func (*ListLiteral) literal() {} func (*StringLiteral) literal() {} func (*TimeLiteral) literal() {} // Source represents a source of data for a statement. type Source interface { Node // source is unexported to ensure implementations of Source // can only originate in this package. source() } func (*Measurement) source() {} func (*SubQuery) source() {} // Sources represents a list of sources. type Sources []Source // String returns a string representation of a Sources array. func (a Sources) String() string { var buf bytes.Buffer ubound := len(a) - 1 for i, src := range a { _, _ = buf.WriteString(src.String()) if i < ubound { _, _ = buf.WriteString(", ") } } return buf.String() } // Measurements returns all measurements including ones embedded in subqueries. func (a Sources) Measurements() []*Measurement { mms := make([]*Measurement, 0, len(a)) for _, src := range a { switch src := src.(type) { case *Measurement: mms = append(mms, src) case *SubQuery: mms = append(mms, src.Statement.Sources.Measurements()...) } } return mms } // MarshalBinary encodes a list of sources to a binary format. func (a Sources) MarshalBinary() ([]byte, error) { var pb internal.Measurements pb.Items = make([]*internal.Measurement, len(a)) for i, source := range a { pb.Items[i] = encodeMeasurement(source.(*Measurement)) } return proto.Marshal(&pb) } // UnmarshalBinary decodes binary data into a list of sources. func (a *Sources) UnmarshalBinary(buf []byte) error { var pb internal.Measurements if err := proto.Unmarshal(buf, &pb); err != nil { return err } *a = make(Sources, len(pb.GetItems())) for i := range pb.GetItems() { mm, err := decodeMeasurement(pb.GetItems()[i]) if err != nil { return err } (*a)[i] = mm } return nil } // RequiredPrivileges recursively returns a list of execution privileges required. func (a Sources) RequiredPrivileges() (ExecutionPrivileges, error) { var ep ExecutionPrivileges for _, source := range a { switch source := source.(type) { case *Measurement: ep = append(ep, ExecutionPrivilege{ Name: source.Database, Privilege: ReadPrivilege, }) case *SubQuery: privs, err := source.Statement.RequiredPrivileges() if err != nil { return nil, err } ep = append(ep, privs...) default: return nil, fmt.Errorf("invalid source: %s", source) } } return ep, nil } // IsSystemName returns true if name is an internal system name. func IsSystemName(name string) bool { switch name { case "_fieldKeys", "_measurements", "_name", "_series", "_tagKey", "_tagKeys", "_tags": return true default: return false } } // SortField represents a field to sort results by. type SortField struct { // Name of the field. Name string // Sort order. Ascending bool } // String returns a string representation of a sort field. func (field *SortField) String() string { var buf bytes.Buffer if field.Name != "" { _, _ = buf.WriteString(field.Name) _, _ = buf.WriteString(" ") } if field.Ascending { _, _ = buf.WriteString("ASC") } else { _, _ = buf.WriteString("DESC") } return buf.String() } // SortFields represents an ordered list of ORDER BY fields. type SortFields []*SortField // String returns a string representation of sort fields. func (a SortFields) String() string { fields := make([]string, 0, len(a)) for _, field := range a { fields = append(fields, field.String()) } return strings.Join(fields, ", ") } // CreateDatabaseStatement represents a command for creating a new database. type CreateDatabaseStatement struct { // Name of the database to be created. Name string // RetentionPolicyCreate indicates whether the user explicitly wants to create a retention policy. RetentionPolicyCreate bool // RetentionPolicyDuration indicates retention duration for the new database. RetentionPolicyDuration *time.Duration // RetentionPolicyReplication indicates retention replication for the new database. RetentionPolicyReplication *int // RetentionPolicyName indicates retention name for the new database. RetentionPolicyName string // RetentionPolicyShardGroupDuration indicates shard group duration for the new database. RetentionPolicyShardGroupDuration time.Duration } // String returns a string representation of the create database statement. func (s *CreateDatabaseStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE DATABASE ") _, _ = buf.WriteString(QuoteIdent(s.Name)) if s.RetentionPolicyCreate { _, _ = buf.WriteString(" WITH") if s.RetentionPolicyDuration != nil { _, _ = buf.WriteString(" DURATION ") _, _ = buf.WriteString(s.RetentionPolicyDuration.String()) } if s.RetentionPolicyReplication != nil { _, _ = buf.WriteString(" REPLICATION ") _, _ = buf.WriteString(strconv.Itoa(*s.RetentionPolicyReplication)) } if s.RetentionPolicyShardGroupDuration > 0 { _, _ = buf.WriteString(" SHARD DURATION ") _, _ = buf.WriteString(s.RetentionPolicyShardGroupDuration.String()) } if s.RetentionPolicyName != "" { _, _ = buf.WriteString(" NAME ") _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicyName)) } } return buf.String() } // RequiredPrivileges returns the privilege required to execute a CreateDatabaseStatement. func (s *CreateDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropDatabaseStatement represents a command to drop a database. type DropDatabaseStatement struct { // Name of the database to be dropped. Name string } // String returns a string representation of the drop database statement. func (s *DropDatabaseStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DROP DATABASE ") _, _ = buf.WriteString(QuoteIdent(s.Name)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a DropDatabaseStatement. func (s *DropDatabaseStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropRetentionPolicyStatement represents a command to drop a retention policy from a database. type DropRetentionPolicyStatement struct { // Name of the policy to drop. Name string // Name of the database to drop the policy from. Database string } // String returns a string representation of the drop retention policy statement. func (s *DropRetentionPolicyStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DROP RETENTION POLICY ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a DropRetentionPolicyStatement. func (s *DropRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *DropRetentionPolicyStatement) DefaultDatabase() string { return s.Database } // CreateUserStatement represents a command for creating a new user. type CreateUserStatement struct { // Name of the user to be created. Name string // User's password. Password string // User's admin privilege. Admin bool } // String returns a string representation of the create user statement. func (s *CreateUserStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE USER ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" WITH PASSWORD ") _, _ = buf.WriteString("[REDACTED]") if s.Admin { _, _ = buf.WriteString(" WITH ALL PRIVILEGES") } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a CreateUserStatement. func (s *CreateUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DropUserStatement represents a command for dropping a user. type DropUserStatement struct { // Name of the user to drop. Name string } // String returns a string representation of the drop user statement. func (s *DropUserStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DROP USER ") _, _ = buf.WriteString(QuoteIdent(s.Name)) return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a DropUserStatement. func (s *DropUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // Privilege is a type of action a user can be granted the right to use. type Privilege int const ( // NoPrivileges means no privileges required / granted / revoked. NoPrivileges Privilege = iota // ReadPrivilege means read privilege required / granted / revoked. ReadPrivilege // WritePrivilege means write privilege required / granted / revoked. WritePrivilege // AllPrivileges means all privileges required / granted / revoked. AllPrivileges ) // NewPrivilege returns an initialized *Privilege. func NewPrivilege(p Privilege) *Privilege { return &p } // String returns a string representation of a Privilege. func (p Privilege) String() string { switch p { case NoPrivileges: return "NO PRIVILEGES" case ReadPrivilege: return "READ" case WritePrivilege: return "WRITE" case AllPrivileges: return "ALL PRIVILEGES" } return "" } // GrantStatement represents a command for granting a privilege. type GrantStatement struct { // The privilege to be granted. Privilege Privilege // Database to grant the privilege to. On string // Who to grant the privilege to. User string } // String returns a string representation of the grant statement. func (s *GrantStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("GRANT ") _, _ = buf.WriteString(s.Privilege.String()) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.On)) _, _ = buf.WriteString(" TO ") _, _ = buf.WriteString(QuoteIdent(s.User)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a GrantStatement. func (s *GrantStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *GrantStatement) DefaultDatabase() string { return s.On } // GrantAdminStatement represents a command for granting admin privilege. type GrantAdminStatement struct { // Who to grant the privilege to. User string } // String returns a string representation of the grant admin statement. func (s *GrantAdminStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("GRANT ALL PRIVILEGES TO ") _, _ = buf.WriteString(QuoteIdent(s.User)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a GrantAdminStatement. func (s *GrantAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // KillQueryStatement represents a command for killing a query. type KillQueryStatement struct { // The query to kill. QueryID uint64 // The host to delegate the kill to. Host string } // String returns a string representation of the kill query statement. func (s *KillQueryStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("KILL QUERY ") _, _ = buf.WriteString(strconv.FormatUint(s.QueryID, 10)) if s.Host != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Host)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a KillQueryStatement. func (s *KillQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // SetPasswordUserStatement represents a command for changing user password. type SetPasswordUserStatement struct { // Plain-text password. Password string // Who to grant the privilege to. Name string } // String returns a string representation of the set password statement. func (s *SetPasswordUserStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SET PASSWORD FOR ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" = ") _, _ = buf.WriteString("[REDACTED]") return buf.String() } // RequiredPrivileges returns the privilege required to execute a SetPasswordUserStatement. func (s *SetPasswordUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // RevokeStatement represents a command to revoke a privilege from a user. type RevokeStatement struct { // The privilege to be revoked. Privilege Privilege // Database to revoke the privilege from. On string // Who to revoke privilege from. User string } // String returns a string representation of the revoke statement. func (s *RevokeStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("REVOKE ") _, _ = buf.WriteString(s.Privilege.String()) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.On)) _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(QuoteIdent(s.User)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a RevokeStatement. func (s *RevokeStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *RevokeStatement) DefaultDatabase() string { return s.On } // RevokeAdminStatement represents a command to revoke admin privilege from a user. type RevokeAdminStatement struct { // Who to revoke admin privilege from. User string } // String returns a string representation of the revoke admin statement. func (s *RevokeAdminStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("REVOKE ALL PRIVILEGES FROM ") _, _ = buf.WriteString(QuoteIdent(s.User)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a RevokeAdminStatement. func (s *RevokeAdminStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // CreateRetentionPolicyStatement represents a command to create a retention policy. type CreateRetentionPolicyStatement struct { // Name of policy to create. Name string // Name of database this policy belongs to. Database string // Duration data written to this policy will be retained. Duration time.Duration // Replication factor for data written to this policy. Replication int // Should this policy be set as default for the database? Default bool // Shard Duration. ShardGroupDuration time.Duration } // String returns a string representation of the create retention policy. func (s *CreateRetentionPolicyStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE RETENTION POLICY ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) _, _ = buf.WriteString(" DURATION ") _, _ = buf.WriteString(FormatDuration(s.Duration)) _, _ = buf.WriteString(" REPLICATION ") _, _ = buf.WriteString(strconv.Itoa(s.Replication)) if s.ShardGroupDuration > 0 { _, _ = buf.WriteString(" SHARD DURATION ") _, _ = buf.WriteString(FormatDuration(s.ShardGroupDuration)) } if s.Default { _, _ = buf.WriteString(" DEFAULT") } return buf.String() } // RequiredPrivileges returns the privilege required to execute a CreateRetentionPolicyStatement. func (s *CreateRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *CreateRetentionPolicyStatement) DefaultDatabase() string { return s.Database } // AlterRetentionPolicyStatement represents a command to alter an existing retention policy. type AlterRetentionPolicyStatement struct { // Name of policy to alter. Name string // Name of the database this policy belongs to. Database string // Duration data written to this policy will be retained. Duration *time.Duration // Replication factor for data written to this policy. Replication *int // Should this policy be set as defalut for the database? Default bool // Duration of the Shard. ShardGroupDuration *time.Duration } // String returns a string representation of the alter retention policy statement. func (s *AlterRetentionPolicyStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("ALTER RETENTION POLICY ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) if s.Duration != nil { _, _ = buf.WriteString(" DURATION ") _, _ = buf.WriteString(FormatDuration(*s.Duration)) } if s.Replication != nil { _, _ = buf.WriteString(" REPLICATION ") _, _ = buf.WriteString(strconv.Itoa(*s.Replication)) } if s.ShardGroupDuration != nil { _, _ = buf.WriteString(" SHARD DURATION ") _, _ = buf.WriteString(FormatDuration(*s.ShardGroupDuration)) } if s.Default { _, _ = buf.WriteString(" DEFAULT") } return buf.String() } // RequiredPrivileges returns the privilege required to execute an AlterRetentionPolicyStatement. func (s *AlterRetentionPolicyStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *AlterRetentionPolicyStatement) DefaultDatabase() string { return s.Database } // FillOption represents different options for filling aggregate windows. type FillOption int const ( // NullFill means that empty aggregate windows will just have null values. NullFill FillOption = iota // NoFill means that empty aggregate windows will be purged from the result. NoFill // NumberFill means that empty aggregate windows will be filled with a provided number. NumberFill // PreviousFill means that empty aggregate windows will be filled with whatever the previous aggregate window had. PreviousFill // LinearFill means that empty aggregate windows will be filled with whatever a linear value between non null windows. LinearFill ) // SelectStatement represents a command for extracting data from the database. type SelectStatement struct { // Expressions returned from the selection. Fields Fields // Target (destination) for the result of a SELECT INTO query. Target *Target // Expressions used for grouping the selection. Dimensions Dimensions // Data sources (measurements) that fields are extracted from. Sources Sources // An expression evaluated on data point. Condition Expr // Fields to sort results by. SortFields SortFields // Maximum number of rows to be returned. Unlimited if zero. Limit int // Returns rows starting at an offset from the first row. Offset int // Maxiumum number of series to be returned. Unlimited if zero. SLimit int // Returns series starting at an offset from the first one. SOffset int // Memoized group by interval from GroupBy(). groupByInterval time.Duration // Whether it's a query for raw data values (i.e. not an aggregate). IsRawQuery bool // What fill option the select statement uses, if any. Fill FillOption // The value to fill empty aggregate buckets with, if any. FillValue interface{} // The timezone for the query, if any. Location *time.Location // Renames the implicit time field name. TimeAlias string // Removes the "time" column from the output. OmitTime bool // Removes measurement name from resulting query. Useful for meta queries. StripName bool // Overrides the output measurement name. EmitName string // Removes duplicate rows from raw queries. Dedupe bool } // TimeAscending returns true if the time field is sorted in chronological order. func (s *SelectStatement) TimeAscending() bool { return len(s.SortFields) == 0 || s.SortFields[0].Ascending } // TimeFieldName returns the name of the time field. func (s *SelectStatement) TimeFieldName() string { if s.TimeAlias != "" { return s.TimeAlias } return "time" } // Clone returns a deep copy of the statement. func (s *SelectStatement) Clone() *SelectStatement { clone := *s clone.Fields = make(Fields, 0, len(s.Fields)) clone.Dimensions = make(Dimensions, 0, len(s.Dimensions)) clone.Sources = cloneSources(s.Sources) clone.SortFields = make(SortFields, 0, len(s.SortFields)) clone.Condition = CloneExpr(s.Condition) if s.Target != nil { clone.Target = &Target{ Measurement: &Measurement{ Database: s.Target.Measurement.Database, RetentionPolicy: s.Target.Measurement.RetentionPolicy, Name: s.Target.Measurement.Name, Regex: CloneRegexLiteral(s.Target.Measurement.Regex), }, } } for _, f := range s.Fields { clone.Fields = append(clone.Fields, &Field{Expr: CloneExpr(f.Expr), Alias: f.Alias}) } for _, d := range s.Dimensions { clone.Dimensions = append(clone.Dimensions, &Dimension{Expr: CloneExpr(d.Expr)}) } for _, f := range s.SortFields { clone.SortFields = append(clone.SortFields, &SortField{Name: f.Name, Ascending: f.Ascending}) } return &clone } func cloneSources(sources Sources) Sources { clone := make(Sources, 0, len(sources)) for _, s := range sources { clone = append(clone, cloneSource(s)) } return clone } func cloneSource(s Source) Source { if s == nil { return nil } switch s := s.(type) { case *Measurement: return s.Clone() case *SubQuery: return &SubQuery{Statement: s.Statement.Clone()} default: panic("unreachable") } } // FieldMapper returns the data type for the field inside of the measurement. type FieldMapper interface { FieldDimensions(m *Measurement) (fields map[string]DataType, dimensions map[string]struct{}, err error) TypeMapper } // RewriteFields returns the re-written form of the select statement. Any wildcard query // fields are replaced with the supplied fields, and any wildcard GROUP BY fields are replaced // with the supplied dimensions. Any fields with no type specifier are rewritten with the // appropriate type. func (s *SelectStatement) RewriteFields(m FieldMapper) (*SelectStatement, error) { // Clone the statement so we aren't rewriting the original. other := s.Clone() // Iterate through the sources and rewrite any subqueries first. for _, src := range other.Sources { switch src := src.(type) { case *SubQuery: stmt, err := src.Statement.RewriteFields(m) if err != nil { return nil, err } src.Statement = stmt } } // Rewrite all variable references in the fields with their types if one // hasn't been specified. rewrite := func(n Node) { ref, ok := n.(*VarRef) if !ok || (ref.Type != Unknown && ref.Type != AnyField) { return } typ := EvalType(ref, other.Sources, m) if typ == Tag && ref.Type == AnyField { return } ref.Type = typ } WalkFunc(other.Fields, rewrite) WalkFunc(other.Condition, rewrite) // Ignore if there are no wildcards. hasFieldWildcard := other.HasFieldWildcard() hasDimensionWildcard := other.HasDimensionWildcard() if !hasFieldWildcard && !hasDimensionWildcard { return other, nil } fieldSet, dimensionSet, err := FieldDimensions(other.Sources, m) if err != nil { return nil, err } // If there are no dimension wildcards then merge dimensions to fields. if !hasDimensionWildcard { // Remove the dimensions present in the group by so they don't get added as fields. for _, d := range other.Dimensions { switch expr := d.Expr.(type) { case *VarRef: delete(dimensionSet, expr.Val) } } } // Sort the field and dimension names for wildcard expansion. var fields []VarRef if len(fieldSet) > 0 { fields = make([]VarRef, 0, len(fieldSet)) for name, typ := range fieldSet { fields = append(fields, VarRef{Val: name, Type: typ}) } if !hasDimensionWildcard { for name := range dimensionSet { fields = append(fields, VarRef{Val: name, Type: Tag}) } dimensionSet = nil } sort.Sort(VarRefs(fields)) } dimensions := stringSetSlice(dimensionSet) // Rewrite all wildcard query fields if hasFieldWildcard { // Allocate a slice assuming there is exactly one wildcard for efficiency. rwFields := make(Fields, 0, len(other.Fields)+len(fields)-1) for _, f := range other.Fields { switch expr := f.Expr.(type) { case *Wildcard: for _, ref := range fields { if expr.Type == FIELD && ref.Type == Tag { continue } else if expr.Type == TAG && ref.Type != Tag { continue } rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) } case *RegexLiteral: for _, ref := range fields { if expr.Val.MatchString(ref.Val) { rwFields = append(rwFields, &Field{Expr: &VarRef{Val: ref.Val, Type: ref.Type}}) } } case *Call: // Clone a template that we can modify and use for new fields. template := CloneExpr(expr).(*Call) // Search for the call with a wildcard by continuously descending until // we no longer have a call. call := template for len(call.Args) > 0 { arg, ok := call.Args[0].(*Call) if !ok { break } call = arg } // Check if this field value is a wildcard. if len(call.Args) == 0 { rwFields = append(rwFields, f) continue } // Retrieve if this is a wildcard or a regular expression. var re *regexp.Regexp switch expr := call.Args[0].(type) { case *Wildcard: if expr.Type == TAG { return nil, fmt.Errorf("unable to use tag wildcard in %s()", call.Name) } case *RegexLiteral: re = expr.Val default: rwFields = append(rwFields, f) continue } // All types that can expand wildcards support float, integer, and unsigned. supportedTypes := map[DataType]struct{}{ Float: {}, Integer: {}, Unsigned: {}, } // Add additional types for certain functions. switch call.Name { case "count", "first", "last", "distinct", "elapsed", "mode", "sample": supportedTypes[String] = struct{}{} fallthrough case "min", "max": supportedTypes[Boolean] = struct{}{} case "holt_winters", "holt_winters_with_fit": delete(supportedTypes, Unsigned) } for _, ref := range fields { // Do not expand tags within a function call. It likely won't do anything // anyway and will be the wrong thing in 99% of cases. if ref.Type == Tag { continue } else if _, ok := supportedTypes[ref.Type]; !ok { continue } else if re != nil && !re.MatchString(ref.Val) { continue } // Make a new expression and replace the wildcard within this cloned expression. call.Args[0] = &VarRef{Val: ref.Val, Type: ref.Type} rwFields = append(rwFields, &Field{ Expr: CloneExpr(template), Alias: fmt.Sprintf("%s_%s", f.Name(), ref.Val), }) } case *BinaryExpr: // Search for regexes or wildcards within the binary // expression. If we find any, throw an error indicating that // it's illegal. var regex, wildcard bool WalkFunc(expr, func(n Node) { switch n.(type) { case *RegexLiteral: regex = true case *Wildcard: wildcard = true } }) if wildcard { return nil, fmt.Errorf("unsupported expression with wildcard: %s", f.Expr) } else if regex { return nil, fmt.Errorf("unsupported expression with regex field: %s", f.Expr) } rwFields = append(rwFields, f) default: rwFields = append(rwFields, f) } } other.Fields = rwFields } // Rewrite all wildcard GROUP BY fields if hasDimensionWildcard { // Allocate a slice assuming there is exactly one wildcard for efficiency. rwDimensions := make(Dimensions, 0, len(other.Dimensions)+len(dimensions)-1) for _, d := range other.Dimensions { switch expr := d.Expr.(type) { case *Wildcard: for _, name := range dimensions { rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) } case *RegexLiteral: for _, name := range dimensions { if expr.Val.MatchString(name) { rwDimensions = append(rwDimensions, &Dimension{Expr: &VarRef{Val: name}}) } } default: rwDimensions = append(rwDimensions, d) } } other.Dimensions = rwDimensions } return other, nil } // RewriteRegexConditions rewrites regex conditions to make better use of the // database index. // // Conditions that can currently be simplified are: // // - host =~ /^foo$/ becomes host = 'foo' // - host !~ /^foo$/ becomes host != 'foo' // // Note: if the regex contains groups, character classes, repetition or // similar, it's likely it won't be rewritten. In order to support rewriting // regexes with these characters would be a lot more work. func (s *SelectStatement) RewriteRegexConditions() { s.Condition = RewriteExpr(s.Condition, func(e Expr) Expr { be, ok := e.(*BinaryExpr) if !ok || (be.Op != EQREGEX && be.Op != NEQREGEX) { // This expression is not a binary condition or doesn't have a // regex based operator. return e } // Handle regex-based condition. rhs := be.RHS.(*RegexLiteral) // This must be a regex. vals, ok := matchExactRegex(rhs.Val.String()) if !ok { // Regex didn't match. return e } // Update the condition operator. var concatOp Token if be.Op == EQREGEX { be.Op = EQ concatOp = OR } else { be.Op = NEQ concatOp = AND } // Remove leading and trailing ^ and $. switch { case len(vals) == 0: be.RHS = &StringLiteral{} case len(vals) == 1: be.RHS = &StringLiteral{Val: vals[0]} default: expr := &BinaryExpr{ Op: be.Op, LHS: be.LHS, RHS: &StringLiteral{Val: vals[0]}, } for i := 1; i < len(vals); i++ { expr = &BinaryExpr{ Op: concatOp, LHS: expr, RHS: &BinaryExpr{ Op: be.Op, LHS: be.LHS, RHS: &StringLiteral{Val: vals[i]}, }, } } return &ParenExpr{Expr: expr} } return be }) // Unwrap any top level parenthesis. if cond, ok := s.Condition.(*ParenExpr); ok { s.Condition = cond.Expr } } // matchExactRegex matches regexes into literals if possible. This will match the // pattern /^foo$/ or /^(foo|bar)$/. It considers /^$/ to be a matching regex. func matchExactRegex(v string) ([]string, bool) { re, err := syntax.Parse(v, syntax.Perl) if err != nil { // Nothing we can do or log. return nil, false } re = re.Simplify() if re.Op != syntax.OpConcat { return nil, false } if len(re.Sub) < 2 { // Regex has too few subexpressions. return nil, false } start := re.Sub[0] if !(start.Op == syntax.OpBeginLine || start.Op == syntax.OpBeginText) { // Regex does not begin with ^ return nil, false } end := re.Sub[len(re.Sub)-1] if !(end.Op == syntax.OpEndLine || end.Op == syntax.OpEndText) { // Regex does not end with $ return nil, false } // Remove the begin and end text from the regex. re.Sub = re.Sub[1 : len(re.Sub)-1] if len(re.Sub) == 0 { // The regex /^$/ return nil, true } return matchRegex(re) } // matchRegex will match a regular expression to literals if possible. func matchRegex(re *syntax.Regexp) ([]string, bool) { // Maximum number of literals that the expression should be expanded to. If // this is exceeded, no expansion will be done. This allows reasonable // optimizations of regex by expansion to literals but prevents cases // where that expansion would result in a large number of literals. const maxLiterals = 100 // Exit if we see a case-insensitive flag as it is not something we support at this time. if re.Flags&syntax.FoldCase != 0 { return nil, false } switch re.Op { case syntax.OpLiteral: // We can rewrite this regex. return []string{string(re.Rune)}, true case syntax.OpCapture: return matchRegex(re.Sub[0]) case syntax.OpConcat: // Go through each of the subs and concatenate the result to each one. names, ok := matchRegex(re.Sub[0]) if !ok { return nil, false } for _, sub := range re.Sub[1:] { vals, ok := matchRegex(sub) if !ok { return nil, false } // If there is only one value, concatenate it to all strings rather // than allocate a new slice. if len(vals) == 1 { for i := range names { names[i] += vals[0] } continue } else if len(names) == 1 { // If there is only one value, then do this concatenation in // the opposite direction. for i := range vals { vals[i] = names[0] + vals[i] } names = vals continue } sz := len(names) * len(vals) if sz > maxLiterals { return nil, false } // The long method of using multiple concatenations. concat := make([]string, sz) for i := range names { for j := range vals { concat[i*len(vals)+j] = names[i] + vals[j] } } names = concat } return names, true case syntax.OpCharClass: var sz int for i := 0; i < len(re.Rune); i += 2 { sz += int(re.Rune[i+1]) - int(re.Rune[i]) + 1 } if sz > maxLiterals { return nil, false } names := make([]string, 0, sz) for i := 0; i < len(re.Rune); i += 2 { for r := int(re.Rune[i]); r <= int(re.Rune[i+1]); r++ { names = append(names, string([]rune{rune(r)})) } } return names, true case syntax.OpAlternate: var names []string for _, sub := range re.Sub { vals, ok := matchRegex(sub) if !ok { return nil, false } names = append(names, vals...) } if len(names) > maxLiterals { return nil, false } return names, true } return nil, false } // RewriteDistinct rewrites the expression to be a call for map/reduce to work correctly. // This method assumes all validation has passed. func (s *SelectStatement) RewriteDistinct() { WalkFunc(s.Fields, func(n Node) { switch n := n.(type) { case *Field: if expr, ok := n.Expr.(*Distinct); ok { n.Expr = expr.NewCall() s.IsRawQuery = false } case *Call: for i, arg := range n.Args { if arg, ok := arg.(*Distinct); ok { n.Args[i] = arg.NewCall() } } } }) } // RewriteTimeFields removes any "time" field references. func (s *SelectStatement) RewriteTimeFields() { for i := 0; i < len(s.Fields); i++ { switch expr := s.Fields[i].Expr.(type) { case *VarRef: if expr.Val == "time" { s.TimeAlias = s.Fields[i].Alias s.Fields = append(s.Fields[:i], s.Fields[i+1:]...) } } } } // ColumnNames will walk all fields and functions and return the appropriate field names for the select statement // while maintaining order of the field names. func (s *SelectStatement) ColumnNames() []string { // First walk each field to determine the number of columns. columnFields := Fields{} for _, field := range s.Fields { columnFields = append(columnFields, field) switch f := field.Expr.(type) { case *Call: if s.Target == nil && (f.Name == "top" || f.Name == "bottom") { for _, arg := range f.Args[1:] { ref, ok := arg.(*VarRef) if ok { columnFields = append(columnFields, &Field{Expr: ref}) } } } } } // Determine if we should add an extra column for an implicit time. offset := 0 if !s.OmitTime { offset++ } columnNames := make([]string, len(columnFields)+offset) if !s.OmitTime { // Add the implicit time if requested. columnNames[0] = s.TimeFieldName() } // Keep track of the encountered column names. names := make(map[string]int) // Resolve aliases first. for i, col := range columnFields { if col.Alias != "" { columnNames[i+offset] = col.Alias names[col.Alias] = 1 } } // Resolve any generated names and resolve conflicts. for i, col := range columnFields { if columnNames[i+offset] != "" { continue } name := col.Name() count, conflict := names[name] if conflict { for { resolvedName := fmt.Sprintf("%s_%d", name, count) _, conflict = names[resolvedName] if !conflict { names[name] = count + 1 name = resolvedName break } count++ } } names[name]++ columnNames[i+offset] = name } return columnNames } // FieldExprByName returns the expression that matches the field name and the // index where this was found. If the name matches one of the arguments to // "top" or "bottom", the variable reference inside of the function is returned // and the index is of the function call rather than the variable reference. // If no expression is found, -1 is returned for the index and the expression // will be nil. func (s *SelectStatement) FieldExprByName(name string) (int, Expr) { for i, f := range s.Fields { if f.Name() == name { return i, f.Expr } else if call, ok := f.Expr.(*Call); ok && (call.Name == "top" || call.Name == "bottom") && len(call.Args) > 2 { for _, arg := range call.Args[1 : len(call.Args)-1] { if arg, ok := arg.(*VarRef); ok && arg.Val == name { return i, arg } } } } return -1, nil } // Reduce calls the Reduce function on the different components of the // SelectStatement to reduce the statement. func (s *SelectStatement) Reduce(valuer Valuer) *SelectStatement { stmt := s.Clone() stmt.Condition = Reduce(stmt.Condition, valuer) for _, d := range stmt.Dimensions { d.Expr = Reduce(d.Expr, valuer) } for _, source := range stmt.Sources { switch source := source.(type) { case *SubQuery: source.Statement = source.Statement.Reduce(valuer) } } return stmt } // String returns a string representation of the select statement. func (s *SelectStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SELECT ") _, _ = buf.WriteString(s.Fields.String()) if s.Target != nil { _, _ = buf.WriteString(" ") _, _ = buf.WriteString(s.Target.String()) } if len(s.Sources) > 0 { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } switch s.Fill { case NoFill: _, _ = buf.WriteString(" fill(none)") case NumberFill: _, _ = buf.WriteString(fmt.Sprintf(" fill(%v)", s.FillValue)) case LinearFill: _, _ = buf.WriteString(" fill(linear)") case PreviousFill: _, _ = buf.WriteString(" fill(previous)") } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } if s.SLimit > 0 { _, _ = fmt.Fprintf(&buf, " SLIMIT %d", s.SLimit) } if s.SOffset > 0 { _, _ = fmt.Fprintf(&buf, " SOFFSET %d", s.SOffset) } if s.Location != nil { _, _ = fmt.Fprintf(&buf, ` TZ('%s')`, s.Location) } return buf.String() } // RequiredPrivileges returns the privilege required to execute the SelectStatement. // NOTE: Statement should be normalized first (database name(s) in Sources and // Target should be populated). If the statement has not been normalized, an // empty string will be returned for the database name and it is up to the caller // to interpret that as the default database. func (s *SelectStatement) RequiredPrivileges() (ExecutionPrivileges, error) { ep, err := s.Sources.RequiredPrivileges() if err != nil { return nil, err } if s.Target != nil { ep = append(ep, ExecutionPrivilege{Admin: false, Name: s.Target.Measurement.Database, Privilege: WritePrivilege}) } return ep, nil } // HasWildcard returns whether or not the select statement has at least 1 wildcard. func (s *SelectStatement) HasWildcard() bool { return s.HasFieldWildcard() || s.HasDimensionWildcard() } // HasFieldWildcard returns whether or not the select statement has at least 1 wildcard in the fields. func (s *SelectStatement) HasFieldWildcard() (hasWildcard bool) { WalkFunc(s.Fields, func(n Node) { if hasWildcard { return } switch n.(type) { case *Wildcard, *RegexLiteral: hasWildcard = true } }) return hasWildcard } // HasDimensionWildcard returns whether or not the select statement has // at least 1 wildcard in the dimensions aka `GROUP BY`. func (s *SelectStatement) HasDimensionWildcard() bool { for _, d := range s.Dimensions { switch d.Expr.(type) { case *Wildcard, *RegexLiteral: return true } } return false } // GroupByInterval extracts the time interval, if specified. func (s *SelectStatement) GroupByInterval() (time.Duration, error) { // return if we've already pulled it out if s.groupByInterval != 0 { return s.groupByInterval, nil } // Ignore if there are no dimensions. if len(s.Dimensions) == 0 { return 0, nil } for _, d := range s.Dimensions { if call, ok := d.Expr.(*Call); ok && call.Name == "time" { // Make sure there is exactly one argument. if got := len(call.Args); got < 1 || got > 2 { return 0, errors.New("time dimension expected 1 or 2 arguments") } // Ensure the argument is a duration. lit, ok := call.Args[0].(*DurationLiteral) if !ok { return 0, errors.New("time dimension must have duration argument") } s.groupByInterval = lit.Val return lit.Val, nil } } return 0, nil } // GroupByOffset extracts the time interval offset, if specified. func (s *SelectStatement) GroupByOffset() (time.Duration, error) { interval, err := s.GroupByInterval() if err != nil { return 0, err } // Ignore if there are no dimensions. if len(s.Dimensions) == 0 { return 0, nil } for _, d := range s.Dimensions { if call, ok := d.Expr.(*Call); ok && call.Name == "time" { if len(call.Args) == 2 { switch expr := call.Args[1].(type) { case *DurationLiteral: return expr.Val % interval, nil case *TimeLiteral: return expr.Val.Sub(expr.Val.Truncate(interval)), nil default: return 0, fmt.Errorf("invalid time dimension offset: %s", expr) } } return 0, nil } } return 0, nil } // SetTimeRange sets the start and end time of the select statement to [start, end). i.e. start inclusive, end exclusive. // This is used commonly for continuous queries so the start and end are in buckets. func (s *SelectStatement) SetTimeRange(start, end time.Time) error { cond := fmt.Sprintf("time >= '%s' AND time < '%s'", start.UTC().Format(time.RFC3339Nano), end.UTC().Format(time.RFC3339Nano)) if s.Condition != nil { cond = fmt.Sprintf("%s AND %s", s.rewriteWithoutTimeDimensions(), cond) } expr, err := NewParser(strings.NewReader(cond)).ParseExpr() if err != nil { return err } // Fold out any previously replaced time dimensions and set the condition. s.Condition = Reduce(expr, nil) return nil } // rewriteWithoutTimeDimensions will remove any WHERE time... clauses from the select statement. // This is necessary when setting an explicit time range to override any that previously existed. func (s *SelectStatement) rewriteWithoutTimeDimensions() string { n := RewriteFunc(s.Condition, func(n Node) Node { switch n := n.(type) { case *BinaryExpr: if n.LHS.String() == "time" { return &BooleanLiteral{Val: true} } return n case *Call: return &BooleanLiteral{Val: true} default: return n } }) return n.String() } func encodeMeasurement(mm *Measurement) *internal.Measurement { pb := &internal.Measurement{ Database: proto.String(mm.Database), RetentionPolicy: proto.String(mm.RetentionPolicy), Name: proto.String(mm.Name), IsTarget: proto.Bool(mm.IsTarget), } if mm.Regex != nil { pb.Regex = proto.String(mm.Regex.Val.String()) } return pb } func decodeMeasurement(pb *internal.Measurement) (*Measurement, error) { mm := &Measurement{ Database: pb.GetDatabase(), RetentionPolicy: pb.GetRetentionPolicy(), Name: pb.GetName(), IsTarget: pb.GetIsTarget(), } if pb.Regex != nil { regex, err := regexp.Compile(pb.GetRegex()) if err != nil { return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) } mm.Regex = &RegexLiteral{Val: regex} } return mm, nil } // walkNames will walk the Expr and return the identifier names used. func walkNames(exp Expr) []string { switch expr := exp.(type) { case *VarRef: return []string{expr.Val} case *Call: var a []string for _, expr := range expr.Args { if ref, ok := expr.(*VarRef); ok { a = append(a, ref.Val) } } return a case *BinaryExpr: var ret []string ret = append(ret, walkNames(expr.LHS)...) ret = append(ret, walkNames(expr.RHS)...) return ret case *ParenExpr: return walkNames(expr.Expr) } return nil } // walkRefs will walk the Expr and return the var refs used. func walkRefs(exp Expr) []VarRef { refs := make(map[VarRef]struct{}) var walk func(exp Expr) walk = func(exp Expr) { switch expr := exp.(type) { case *VarRef: refs[*expr] = struct{}{} case *Call: for _, expr := range expr.Args { if ref, ok := expr.(*VarRef); ok { refs[*ref] = struct{}{} } } case *BinaryExpr: walk(expr.LHS) walk(expr.RHS) case *ParenExpr: walk(expr.Expr) } } walk(exp) // Turn the map into a slice. a := make([]VarRef, 0, len(refs)) for ref := range refs { a = append(a, ref) } return a } // ExprNames returns a list of non-"time" field names from an expression. func ExprNames(expr Expr) []VarRef { m := make(map[VarRef]struct{}) for _, ref := range walkRefs(expr) { if ref.Val == "time" { continue } m[ref] = struct{}{} } a := make([]VarRef, 0, len(m)) for k := range m { a = append(a, k) } sort.Sort(VarRefs(a)) return a } // Target represents a target (destination) policy, measurement, and DB. type Target struct { // Measurement to write into. Measurement *Measurement } // String returns a string representation of the Target. func (t *Target) String() string { if t == nil { return "" } var buf bytes.Buffer _, _ = buf.WriteString("INTO ") _, _ = buf.WriteString(t.Measurement.String()) if t.Measurement.Name == "" { _, _ = buf.WriteString(":MEASUREMENT") } return buf.String() } // ExplainStatement represents a command for explaining a select statement. type ExplainStatement struct { Statement *SelectStatement Analyze bool } // String returns a string representation of the explain statement. func (e *ExplainStatement) String() string { var buf bytes.Buffer buf.WriteString("EXPLAIN ") if e.Analyze { buf.WriteString("ANALYZE ") } buf.WriteString(e.Statement.String()) return buf.String() } // RequiredPrivileges returns the privilege required to execute a ExplainStatement. func (e *ExplainStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return e.Statement.RequiredPrivileges() } // DeleteStatement represents a command for deleting data from the database. type DeleteStatement struct { // Data source that values are removed from. Source Source // An expression evaluated on data point. Condition Expr } // String returns a string representation of the delete statement. func (s *DeleteStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DELETE FROM ") _, _ = buf.WriteString(s.Source.String()) if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a DeleteStatement. func (s *DeleteStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *DeleteStatement) DefaultDatabase() string { if m, ok := s.Source.(*Measurement); ok { return m.Database } return "" } // ShowSeriesStatement represents a command for listing series in the database. type ShowSeriesStatement struct { // Database to query. If blank, use the default database. // The database can also be specified per source in the Sources. Database string // Measurement(s) the series are listed for. Sources Sources // An expression evaluated on a series name or tag. Condition Expr // Fields to sort results by SortFields SortFields // Maximum number of rows to be returned. // Unlimited if zero. Limit int // Returns rows starting at an offset from the first row. Offset int } // String returns a string representation of the list series statement. func (s *ShowSeriesStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW SERIES") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = buf.WriteString(" LIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.Limit)) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowSeriesStatement. func (s *ShowSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowSeriesStatement) DefaultDatabase() string { return s.Database } // DropSeriesStatement represents a command for removing a series from the database. type DropSeriesStatement struct { // Data source that fields are extracted from (optional) Sources Sources // An expression evaluated on data point (optional) Condition Expr } // String returns a string representation of the drop series statement. func (s *DropSeriesStatement) String() string { var buf bytes.Buffer buf.WriteString("DROP SERIES") if s.Sources != nil { buf.WriteString(" FROM ") buf.WriteString(s.Sources.String()) } if s.Condition != nil { buf.WriteString(" WHERE ") buf.WriteString(s.Condition.String()) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a DropSeriesStatement. func (s DropSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil } // DeleteSeriesStatement represents a command for deleting all or part of a series from a database. type DeleteSeriesStatement struct { // Data source that fields are extracted from (optional) Sources Sources // An expression evaluated on data point (optional) Condition Expr } // String returns a string representation of the delete series statement. func (s *DeleteSeriesStatement) String() string { var buf bytes.Buffer buf.WriteString("DELETE") if s.Sources != nil { buf.WriteString(" FROM ") buf.WriteString(s.Sources.String()) } if s.Condition != nil { buf.WriteString(" WHERE ") buf.WriteString(s.Condition.String()) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a DeleteSeriesStatement. func (s DeleteSeriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: WritePrivilege}}, nil } // DropShardStatement represents a command for removing a shard from // the node. type DropShardStatement struct { // ID of the shard to be dropped. ID uint64 } // String returns a string representation of the drop series statement. func (s *DropShardStatement) String() string { var buf bytes.Buffer buf.WriteString("DROP SHARD ") buf.WriteString(strconv.FormatUint(s.ID, 10)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a // DropShardStatement. func (s *DropShardStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowSeriesCardinalityStatement represents a command for listing series cardinality. type ShowSeriesCardinalityStatement struct { // Database to query. If blank, use the default database. // The database can also be specified per source in the Sources. Database string // Specifies whether the user requires exact counting or not. Exact bool // Measurement(s) the series are listed for. Sources Sources // An expression evaluated on a series name or tag. Condition Expr // Expressions used for grouping the selection. Dimensions Dimensions Limit, Offset int } // String returns a string representation of the show continuous queries statement. func (s *ShowSeriesCardinalityStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW SERIES") if s.Exact { _, _ = buf.WriteString(" EXACT") } _, _ = buf.WriteString(" CARDINALITY") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowSeriesCardinalityStatement. func (s *ShowSeriesCardinalityStatement) RequiredPrivileges() (ExecutionPrivileges, error) { if !s.Exact { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } return s.Sources.RequiredPrivileges() } // DefaultDatabase returns the default database from the statement. func (s *ShowSeriesCardinalityStatement) DefaultDatabase() string { return s.Database } // ShowContinuousQueriesStatement represents a command for listing continuous queries. type ShowContinuousQueriesStatement struct{} // String returns a string representation of the show continuous queries statement. func (s *ShowContinuousQueriesStatement) String() string { return "SHOW CONTINUOUS QUERIES" } // RequiredPrivileges returns the privilege required to execute a ShowContinuousQueriesStatement. func (s *ShowContinuousQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowGrantsForUserStatement represents a command for listing user privileges. type ShowGrantsForUserStatement struct { // Name of the user to display privileges. Name string } // String returns a string representation of the show grants for user. func (s *ShowGrantsForUserStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW GRANTS FOR ") _, _ = buf.WriteString(QuoteIdent(s.Name)) return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowGrantsForUserStatement func (s *ShowGrantsForUserStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowDatabasesStatement represents a command for listing all databases in the cluster. type ShowDatabasesStatement struct{} // String returns a string representation of the show databases command. func (s *ShowDatabasesStatement) String() string { return "SHOW DATABASES" } // RequiredPrivileges returns the privilege required to execute a ShowDatabasesStatement. func (s *ShowDatabasesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { // SHOW DATABASES is one of few statements that have no required privileges. // Anyone is allowed to execute it, but the returned results depend on the user's // individual database permissions. return ExecutionPrivileges{{Admin: false, Name: "", Privilege: NoPrivileges}}, nil } // CreateContinuousQueryStatement represents a command for creating a continuous query. type CreateContinuousQueryStatement struct { // Name of the continuous query to be created. Name string // Name of the database to create the continuous query on. Database string // Source of data (SELECT statement). Source *SelectStatement // Interval to resample previous queries. ResampleEvery time.Duration // Maximum duration to resample previous queries. ResampleFor time.Duration } // String returns a string representation of the statement. func (s *CreateContinuousQueryStatement) String() string { var buf bytes.Buffer fmt.Fprintf(&buf, "CREATE CONTINUOUS QUERY %s ON %s ", QuoteIdent(s.Name), QuoteIdent(s.Database)) if s.ResampleEvery > 0 || s.ResampleFor > 0 { buf.WriteString("RESAMPLE ") if s.ResampleEvery > 0 { fmt.Fprintf(&buf, "EVERY %s ", FormatDuration(s.ResampleEvery)) } if s.ResampleFor > 0 { fmt.Fprintf(&buf, "FOR %s ", FormatDuration(s.ResampleFor)) } } fmt.Fprintf(&buf, "BEGIN %s END", s.Source.String()) return buf.String() } // DefaultDatabase returns the default database from the statement. func (s *CreateContinuousQueryStatement) DefaultDatabase() string { return s.Database } // RequiredPrivileges returns the privilege required to execute a CreateContinuousQueryStatement. func (s *CreateContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { ep := ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}} // Selecting into a database that's different from the source? if s.Source.Target.Measurement.Database != "" { // Change source database privilege requirement to read. ep[0].Privilege = ReadPrivilege // Add destination database privilege requirement and set it to write. p := ExecutionPrivilege{ Admin: false, Name: s.Source.Target.Measurement.Database, Privilege: WritePrivilege, } ep = append(ep, p) } return ep, nil } func (s *CreateContinuousQueryStatement) validate() error { interval, err := s.Source.GroupByInterval() if err != nil { return err } if s.ResampleFor != 0 { if s.ResampleEvery != 0 && s.ResampleEvery > interval { interval = s.ResampleEvery } if interval > s.ResampleFor { return fmt.Errorf("FOR duration must be >= GROUP BY time duration: must be a minimum of %s, got %s", FormatDuration(interval), FormatDuration(s.ResampleFor)) } } return nil } // DropContinuousQueryStatement represents a command for removing a continuous query. type DropContinuousQueryStatement struct { Name string Database string } // String returns a string representation of the statement. func (s *DropContinuousQueryStatement) String() string { return fmt.Sprintf("DROP CONTINUOUS QUERY %s ON %s", QuoteIdent(s.Name), QuoteIdent(s.Database)) } // RequiredPrivileges returns the privilege(s) required to execute a DropContinuousQueryStatement func (s *DropContinuousQueryStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: WritePrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *DropContinuousQueryStatement) DefaultDatabase() string { return s.Database } // ShowMeasurementCardinalityStatement represents a command for listing measurement cardinality. type ShowMeasurementCardinalityStatement struct { Exact bool // If false then cardinality estimation will be used. Database string Sources Sources Condition Expr Dimensions Dimensions Limit, Offset int } // String returns a string representation of the statement. func (s *ShowMeasurementCardinalityStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW MEASUREMENT") if s.Exact { _, _ = buf.WriteString(" EXACT") } _, _ = buf.WriteString(" CARDINALITY") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowMeasurementCardinalityStatement. func (s *ShowMeasurementCardinalityStatement) RequiredPrivileges() (ExecutionPrivileges, error) { if !s.Exact { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } return s.Sources.RequiredPrivileges() } // DefaultDatabase returns the default database from the statement. func (s *ShowMeasurementCardinalityStatement) DefaultDatabase() string { return s.Database } // ShowMeasurementsStatement represents a command for listing measurements. type ShowMeasurementsStatement struct { // Database to query. If blank, use the default database. Database string // Measurement name or regex. Source Source // An expression evaluated on data point. Condition Expr // Fields to sort results by SortFields SortFields // Maximum number of rows to be returned. // Unlimited if zero. Limit int // Returns rows starting at an offset from the first row. Offset int } // String returns a string representation of the statement. func (s *ShowMeasurementsStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW MEASUREMENTS") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(s.Database) } if s.Source != nil { _, _ = buf.WriteString(" WITH MEASUREMENT ") if m, ok := s.Source.(*Measurement); ok && m.Regex != nil { _, _ = buf.WriteString("=~ ") } else { _, _ = buf.WriteString("= ") } _, _ = buf.WriteString(s.Source.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = buf.WriteString(" LIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.Limit)) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowMeasurementsStatement. func (s *ShowMeasurementsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowMeasurementsStatement) DefaultDatabase() string { return s.Database } // DropMeasurementStatement represents a command to drop a measurement. type DropMeasurementStatement struct { // Name of the measurement to be dropped. Name string } // String returns a string representation of the drop measurement statement. func (s *DropMeasurementStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("DROP MEASUREMENT ") _, _ = buf.WriteString(QuoteIdent(s.Name)) return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a DropMeasurementStatement func (s *DropMeasurementStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowQueriesStatement represents a command for listing all running queries. type ShowQueriesStatement struct{} // String returns a string representation of the show queries statement. func (s *ShowQueriesStatement) String() string { return "SHOW QUERIES" } // RequiredPrivileges returns the privilege required to execute a ShowQueriesStatement. func (s *ShowQueriesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: "", Privilege: ReadPrivilege}}, nil } // ShowRetentionPoliciesStatement represents a command for listing retention policies. type ShowRetentionPoliciesStatement struct { // Name of the database to list policies for. Database string } // String returns a string representation of a ShowRetentionPoliciesStatement. func (s *ShowRetentionPoliciesStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW RETENTION POLICIES") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowRetentionPoliciesStatement func (s *ShowRetentionPoliciesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowRetentionPoliciesStatement) DefaultDatabase() string { return s.Database } // ShowStatsStatement displays statistics for a given module. type ShowStatsStatement struct { Module string } // String returns a string representation of a ShowStatsStatement. func (s *ShowStatsStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW STATS") if s.Module != "" { _, _ = buf.WriteString(" FOR ") _, _ = buf.WriteString(QuoteString(s.Module)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowStatsStatement func (s *ShowStatsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowShardGroupsStatement represents a command for displaying shard groups in the cluster. type ShowShardGroupsStatement struct{} // String returns a string representation of the SHOW SHARD GROUPS command. func (s *ShowShardGroupsStatement) String() string { return "SHOW SHARD GROUPS" } // RequiredPrivileges returns the privileges required to execute the statement. func (s *ShowShardGroupsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowShardsStatement represents a command for displaying shards in the cluster. type ShowShardsStatement struct{} // String returns a string representation. func (s *ShowShardsStatement) String() string { return "SHOW SHARDS" } // RequiredPrivileges returns the privileges required to execute the statement. func (s *ShowShardsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowDiagnosticsStatement represents a command for show node diagnostics. type ShowDiagnosticsStatement struct { // Module Module string } // String returns a string representation of the ShowDiagnosticsStatement. func (s *ShowDiagnosticsStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW DIAGNOSTICS") if s.Module != "" { _, _ = buf.WriteString(" FOR ") _, _ = buf.WriteString(QuoteString(s.Module)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowDiagnosticsStatement func (s *ShowDiagnosticsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // CreateSubscriptionStatement represents a command to add a subscription to the incoming data stream. type CreateSubscriptionStatement struct { Name string Database string RetentionPolicy string Destinations []string Mode string } // String returns a string representation of the CreateSubscriptionStatement. func (s *CreateSubscriptionStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("CREATE SUBSCRIPTION ") _, _ = buf.WriteString(QuoteIdent(s.Name)) _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) _, _ = buf.WriteString(".") _, _ = buf.WriteString(QuoteIdent(s.RetentionPolicy)) _, _ = buf.WriteString(" DESTINATIONS ") _, _ = buf.WriteString(s.Mode) _, _ = buf.WriteString(" ") for i, dest := range s.Destinations { if i != 0 { _, _ = buf.WriteString(", ") } _, _ = buf.WriteString(QuoteString(dest)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a CreateSubscriptionStatement. func (s *CreateSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *CreateSubscriptionStatement) DefaultDatabase() string { return s.Database } // DropSubscriptionStatement represents a command to drop a subscription to the incoming data stream. type DropSubscriptionStatement struct { Name string Database string RetentionPolicy string } // String returns a string representation of the DropSubscriptionStatement. func (s *DropSubscriptionStatement) String() string { return fmt.Sprintf(`DROP SUBSCRIPTION %s ON %s.%s`, QuoteIdent(s.Name), QuoteIdent(s.Database), QuoteIdent(s.RetentionPolicy)) } // RequiredPrivileges returns the privilege required to execute a DropSubscriptionStatement func (s *DropSubscriptionStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // DefaultDatabase returns the default database from the statement. func (s *DropSubscriptionStatement) DefaultDatabase() string { return s.Database } // ShowSubscriptionsStatement represents a command to show a list of subscriptions. type ShowSubscriptionsStatement struct { } // String returns a string representation of the ShowSubscriptionsStatement. func (s *ShowSubscriptionsStatement) String() string { return "SHOW SUBSCRIPTIONS" } // RequiredPrivileges returns the privilege required to execute a ShowSubscriptionsStatement. func (s *ShowSubscriptionsStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowTagKeysStatement represents a command for listing tag keys. type ShowTagKeysStatement struct { // Database to query. If blank, use the default database. // The database can also be specified per source in the Sources. Database string // Data sources that fields are extracted from. Sources Sources // An expression evaluated on data point. Condition Expr // Fields to sort results by. SortFields SortFields // Maximum number of tag keys per measurement. Unlimited if zero. Limit int // Returns tag keys starting at an offset from the first row. Offset int // Maxiumum number of series to be returned. Unlimited if zero. SLimit int // Returns series starting at an offset from the first one. SOffset int } // String returns a string representation of the statement. func (s *ShowTagKeysStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG KEYS") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = buf.WriteString(" LIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.Limit)) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } if s.SLimit > 0 { _, _ = buf.WriteString(" SLIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.SLimit)) } if s.SOffset > 0 { _, _ = buf.WriteString(" SOFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.SOffset)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowTagKeysStatement. func (s *ShowTagKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowTagKeysStatement) DefaultDatabase() string { return s.Database } // ShowTagKeyCardinalityStatement represents a command for listing tag key cardinality. type ShowTagKeyCardinalityStatement struct { Database string Exact bool Sources Sources Condition Expr Dimensions Dimensions Limit, Offset int } // String returns a string representation of the statement. func (s *ShowTagKeyCardinalityStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG KEY ") if s.Exact { _, _ = buf.WriteString("EXACT ") } _, _ = buf.WriteString("CARDINALITY") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowTagKeyCardinalityStatement. func (s *ShowTagKeyCardinalityStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return s.Sources.RequiredPrivileges() } // DefaultDatabase returns the default database from the statement. func (s *ShowTagKeyCardinalityStatement) DefaultDatabase() string { return s.Database } // ShowTagValuesStatement represents a command for listing tag values. type ShowTagValuesStatement struct { // Database to query. If blank, use the default database. // The database can also be specified per source in the Sources. Database string // Data source that fields are extracted from. Sources Sources // Operation to use when selecting tag key(s). Op Token // Literal to compare the tag key(s) with. TagKeyExpr Literal // An expression evaluated on data point. Condition Expr // Fields to sort results by. SortFields SortFields // Maximum number of rows to be returned. // Unlimited if zero. Limit int // Returns rows starting at an offset from the first row. Offset int } // String returns a string representation of the statement. func (s *ShowTagValuesStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG VALUES") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } _, _ = buf.WriteString(" WITH KEY ") _, _ = buf.WriteString(s.Op.String()) _, _ = buf.WriteString(" ") if lit, ok := s.TagKeyExpr.(*StringLiteral); ok { _, _ = buf.WriteString(QuoteIdent(lit.Val)) } else { _, _ = buf.WriteString(s.TagKeyExpr.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = buf.WriteString(" LIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.Limit)) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowTagValuesStatement. func (s *ShowTagValuesStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowTagValuesStatement) DefaultDatabase() string { return s.Database } // ShowTagValuesCardinalityStatement represents a command for listing tag value cardinality. type ShowTagValuesCardinalityStatement struct { Database string Exact bool Sources Sources Op Token TagKeyExpr Literal Condition Expr Dimensions Dimensions Limit, Offset int } // String returns a string representation of the statement. func (s *ShowTagValuesCardinalityStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW TAG VALUES ") if s.Exact { _, _ = buf.WriteString("EXACT ") } _, _ = buf.WriteString("CARDINALITY") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } _, _ = buf.WriteString(" WITH KEY ") _, _ = buf.WriteString(s.Op.String()) _, _ = buf.WriteString(" ") if lit, ok := s.TagKeyExpr.(*StringLiteral); ok { _, _ = buf.WriteString(QuoteIdent(lit.Val)) } else { _, _ = buf.WriteString(s.TagKeyExpr.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowTagValuesCardinalityStatement. func (s *ShowTagValuesCardinalityStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return s.Sources.RequiredPrivileges() } // DefaultDatabase returns the default database from the statement. func (s *ShowTagValuesCardinalityStatement) DefaultDatabase() string { return s.Database } // ShowUsersStatement represents a command for listing users. type ShowUsersStatement struct{} // String returns a string representation of the ShowUsersStatement. func (s *ShowUsersStatement) String() string { return "SHOW USERS" } // RequiredPrivileges returns the privilege(s) required to execute a ShowUsersStatement func (s *ShowUsersStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: true, Name: "", Privilege: AllPrivileges}}, nil } // ShowFieldKeyCardinalityStatement represents a command for listing field key cardinality. type ShowFieldKeyCardinalityStatement struct { Database string Exact bool Sources Sources Condition Expr Dimensions Dimensions Limit, Offset int } // String returns a string representation of the statement. func (s *ShowFieldKeyCardinalityStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW FIELD KEY ") if s.Exact { _, _ = buf.WriteString("EXACT ") } _, _ = buf.WriteString("CARDINALITY") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if s.Condition != nil { _, _ = buf.WriteString(" WHERE ") _, _ = buf.WriteString(s.Condition.String()) } if len(s.Dimensions) > 0 { _, _ = buf.WriteString(" GROUP BY ") _, _ = buf.WriteString(s.Dimensions.String()) } if s.Limit > 0 { _, _ = fmt.Fprintf(&buf, " LIMIT %d", s.Limit) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege required to execute a ShowFieldKeyCardinalityStatement. func (s *ShowFieldKeyCardinalityStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return s.Sources.RequiredPrivileges() } // DefaultDatabase returns the default database from the statement. func (s *ShowFieldKeyCardinalityStatement) DefaultDatabase() string { return s.Database } // ShowFieldKeysStatement represents a command for listing field keys. type ShowFieldKeysStatement struct { // Database to query. If blank, use the default database. // The database can also be specified per source in the Sources. Database string // Data sources that fields are extracted from. Sources Sources // Fields to sort results by SortFields SortFields // Maximum number of rows to be returned. // Unlimited if zero. Limit int // Returns rows starting at an offset from the first row. Offset int } // String returns a string representation of the statement. func (s *ShowFieldKeysStatement) String() string { var buf bytes.Buffer _, _ = buf.WriteString("SHOW FIELD KEYS") if s.Database != "" { _, _ = buf.WriteString(" ON ") _, _ = buf.WriteString(QuoteIdent(s.Database)) } if s.Sources != nil { _, _ = buf.WriteString(" FROM ") _, _ = buf.WriteString(s.Sources.String()) } if len(s.SortFields) > 0 { _, _ = buf.WriteString(" ORDER BY ") _, _ = buf.WriteString(s.SortFields.String()) } if s.Limit > 0 { _, _ = buf.WriteString(" LIMIT ") _, _ = buf.WriteString(strconv.Itoa(s.Limit)) } if s.Offset > 0 { _, _ = buf.WriteString(" OFFSET ") _, _ = buf.WriteString(strconv.Itoa(s.Offset)) } return buf.String() } // RequiredPrivileges returns the privilege(s) required to execute a ShowFieldKeysStatement. func (s *ShowFieldKeysStatement) RequiredPrivileges() (ExecutionPrivileges, error) { return ExecutionPrivileges{{Admin: false, Name: s.Database, Privilege: ReadPrivilege}}, nil } // DefaultDatabase returns the default database from the statement. func (s *ShowFieldKeysStatement) DefaultDatabase() string { return s.Database } // Fields represents a list of fields. type Fields []*Field // AliasNames returns a list of calculated field names in // order of alias, function name, then field. func (a Fields) AliasNames() []string { names := []string{} for _, f := range a { names = append(names, f.Name()) } return names } // Names returns a list of field names. func (a Fields) Names() []string { names := []string{} for _, f := range a { switch expr := f.Expr.(type) { case *Call: names = append(names, expr.Name) case *VarRef: names = append(names, expr.Val) case *BinaryExpr: names = append(names, walkNames(expr)...) case *ParenExpr: names = append(names, walkNames(expr)...) } } return names } // String returns a string representation of the fields. func (a Fields) String() string { var str []string for _, f := range a { str = append(str, f.String()) } return strings.Join(str, ", ") } // Field represents an expression retrieved from a select statement. type Field struct { Expr Expr Alias string } // Name returns the name of the field. Returns alias, if set. // Otherwise uses the function name or variable name. func (f *Field) Name() string { // Return alias, if set. if f.Alias != "" { return f.Alias } // Return the function name or variable name, if available. switch expr := f.Expr.(type) { case *Call: return expr.Name case *BinaryExpr: return BinaryExprName(expr) case *ParenExpr: f := Field{Expr: expr.Expr} return f.Name() case *VarRef: return expr.Val } // Otherwise return a blank name. return "" } // String returns a string representation of the field. func (f *Field) String() string { str := f.Expr.String() if f.Alias == "" { return str } return fmt.Sprintf("%s AS %s", str, QuoteIdent(f.Alias)) } // Len implements sort.Interface. func (a Fields) Len() int { return len(a) } // Less implements sort.Interface. func (a Fields) Less(i, j int) bool { return a[i].Name() < a[j].Name() } // Swap implements sort.Interface. func (a Fields) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Dimensions represents a list of dimensions. type Dimensions []*Dimension // String returns a string representation of the dimensions. func (a Dimensions) String() string { var str []string for _, d := range a { str = append(str, d.String()) } return strings.Join(str, ", ") } // Normalize returns the interval and tag dimensions separately. // Returns 0 if no time interval is specified. func (a Dimensions) Normalize() (time.Duration, []string) { var dur time.Duration var tags []string for _, dim := range a { switch expr := dim.Expr.(type) { case *Call: lit, _ := expr.Args[0].(*DurationLiteral) dur = lit.Val case *VarRef: tags = append(tags, expr.Val) } } return dur, tags } // Dimension represents an expression that a select statement is grouped by. type Dimension struct { Expr Expr } // String returns a string representation of the dimension. func (d *Dimension) String() string { return d.Expr.String() } // Measurements represents a list of measurements. type Measurements []*Measurement // String returns a string representation of the measurements. func (a Measurements) String() string { var str []string for _, m := range a { str = append(str, m.String()) } return strings.Join(str, ", ") } // Measurement represents a single measurement used as a datasource. type Measurement struct { Database string RetentionPolicy string Name string Regex *RegexLiteral IsTarget bool // This field indicates that the measurement should read be read from the // specified system iterator. SystemIterator string } // Clone returns a deep clone of the Measurement. func (m *Measurement) Clone() *Measurement { var regexp *RegexLiteral if m.Regex != nil && m.Regex.Val != nil { regexp = &RegexLiteral{Val: m.Regex.Val.Copy()} } return &Measurement{ Database: m.Database, RetentionPolicy: m.RetentionPolicy, Name: m.Name, Regex: regexp, IsTarget: m.IsTarget, SystemIterator: m.SystemIterator, } } // String returns a string representation of the measurement. func (m *Measurement) String() string { var buf bytes.Buffer if m.Database != "" { _, _ = buf.WriteString(QuoteIdent(m.Database)) _, _ = buf.WriteString(".") } if m.RetentionPolicy != "" { _, _ = buf.WriteString(QuoteIdent(m.RetentionPolicy)) } if m.Database != "" || m.RetentionPolicy != "" { _, _ = buf.WriteString(`.`) } if m.Name != "" && m.SystemIterator == "" { _, _ = buf.WriteString(QuoteIdent(m.Name)) } else if m.SystemIterator != "" { _, _ = buf.WriteString(QuoteIdent(m.SystemIterator)) } else if m.Regex != nil { _, _ = buf.WriteString(m.Regex.String()) } return buf.String() } // SubQuery is a source with a SelectStatement as the backing store. type SubQuery struct { Statement *SelectStatement } // String returns a string representation of the subquery. func (s *SubQuery) String() string { return fmt.Sprintf("(%s)", s.Statement.String()) } // VarRef represents a reference to a variable. type VarRef struct { Val string Type DataType } // String returns a string representation of the variable reference. func (r *VarRef) String() string { buf := bytes.NewBufferString(QuoteIdent(r.Val)) if r.Type != Unknown { buf.WriteString("::") buf.WriteString(r.Type.String()) } return buf.String() } // VarRefs represents a slice of VarRef types. type VarRefs []VarRef // Len implements sort.Interface. func (a VarRefs) Len() int { return len(a) } // Less implements sort.Interface. func (a VarRefs) Less(i, j int) bool { if a[i].Val != a[j].Val { return a[i].Val < a[j].Val } return a[i].Type < a[j].Type } // Swap implements sort.Interface. func (a VarRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Strings returns a slice of the variable names. func (a VarRefs) Strings() []string { s := make([]string, len(a)) for i, ref := range a { s[i] = ref.Val } return s } // Call represents a function call. type Call struct { Name string Args []Expr } // String returns a string representation of the call. func (c *Call) String() string { // Join arguments. var str []string for _, arg := range c.Args { str = append(str, arg.String()) } // Write function name and args. return fmt.Sprintf("%s(%s)", c.Name, strings.Join(str, ", ")) } // Distinct represents a DISTINCT expression. type Distinct struct { // Identifier following DISTINCT Val string } // String returns a string representation of the expression. func (d *Distinct) String() string { return fmt.Sprintf("DISTINCT %s", d.Val) } // NewCall returns a new call expression from this expressions. func (d *Distinct) NewCall() *Call { return &Call{ Name: "distinct", Args: []Expr{ &VarRef{Val: d.Val}, }, } } // NumberLiteral represents a numeric literal. type NumberLiteral struct { Val float64 } // String returns a string representation of the literal. func (l *NumberLiteral) String() string { return strconv.FormatFloat(l.Val, 'f', 3, 64) } // IntegerLiteral represents an integer literal. type IntegerLiteral struct { Val int64 } // String returns a string representation of the literal. func (l *IntegerLiteral) String() string { return fmt.Sprintf("%d", l.Val) } // UnsignedLiteral represents an unsigned literal. The parser will only use an unsigned literal if the parsed // integer is greater than math.MaxInt64. type UnsignedLiteral struct { Val uint64 } // String returns a string representation of the literal. func (l *UnsignedLiteral) String() string { return strconv.FormatUint(l.Val, 10) } // BooleanLiteral represents a boolean literal. type BooleanLiteral struct { Val bool } // String returns a string representation of the literal. func (l *BooleanLiteral) String() string { if l.Val { return "true" } return "false" } // isTrueLiteral returns true if the expression is a literal "true" value. func isTrueLiteral(expr Expr) bool { if expr, ok := expr.(*BooleanLiteral); ok { return expr.Val == true } return false } // isFalseLiteral returns true if the expression is a literal "false" value. func isFalseLiteral(expr Expr) bool { if expr, ok := expr.(*BooleanLiteral); ok { return expr.Val == false } return false } // ListLiteral represents a list of tag key literals. type ListLiteral struct { Vals []string } // String returns a string representation of the literal. func (s *ListLiteral) String() string { var buf bytes.Buffer _, _ = buf.WriteString("(") for idx, tagKey := range s.Vals { if idx != 0 { _, _ = buf.WriteString(", ") } _, _ = buf.WriteString(QuoteIdent(tagKey)) } _, _ = buf.WriteString(")") return buf.String() } // StringLiteral represents a string literal. type StringLiteral struct { Val string } // String returns a string representation of the literal. func (l *StringLiteral) String() string { return QuoteString(l.Val) } // IsTimeLiteral returns if this string can be interpreted as a time literal. func (l *StringLiteral) IsTimeLiteral() bool { return isDateTimeString(l.Val) || isDateString(l.Val) } // ToTimeLiteral returns a time literal if this string can be converted to a time literal. func (l *StringLiteral) ToTimeLiteral(loc *time.Location) (*TimeLiteral, error) { if loc == nil { loc = time.UTC } if isDateTimeString(l.Val) { t, err := time.ParseInLocation(DateTimeFormat, l.Val, loc) if err != nil { // try to parse it as an RFCNano time t, err = time.ParseInLocation(time.RFC3339Nano, l.Val, loc) if err != nil { return nil, ErrInvalidTime } } return &TimeLiteral{Val: t}, nil } else if isDateString(l.Val) { t, err := time.ParseInLocation(DateFormat, l.Val, loc) if err != nil { return nil, ErrInvalidTime } return &TimeLiteral{Val: t}, nil } return nil, ErrInvalidTime } // TimeLiteral represents a point-in-time literal. type TimeLiteral struct { Val time.Time } // String returns a string representation of the literal. func (l *TimeLiteral) String() string { return `'` + l.Val.UTC().Format(time.RFC3339Nano) + `'` } // DurationLiteral represents a duration literal. type DurationLiteral struct { Val time.Duration } // String returns a string representation of the literal. func (l *DurationLiteral) String() string { return FormatDuration(l.Val) } // NilLiteral represents a nil literal. // This is not available to the query language itself. It's only used internally. type NilLiteral struct{} // String returns a string representation of the literal. func (l *NilLiteral) String() string { return `nil` } // BoundParameter represents a bound parameter literal. // This is not available to the query language itself, but can be used when // constructing a query string from an AST. type BoundParameter struct { Name string } // String returns a string representation of the bound parameter. func (bp *BoundParameter) String() string { return fmt.Sprintf("$%s", QuoteIdent(bp.Name)) } // BinaryExpr represents an operation between two expressions. type BinaryExpr struct { Op Token LHS Expr RHS Expr } // String returns a string representation of the binary expression. func (e *BinaryExpr) String() string { return fmt.Sprintf("%s %s %s", e.LHS.String(), e.Op.String(), e.RHS.String()) } // BinaryExprName returns the name of a binary expression by concatenating // the variables in the binary expression with underscores. func BinaryExprName(expr *BinaryExpr) string { v := binaryExprNameVisitor{} Walk(&v, expr) return strings.Join(v.names, "_") } type binaryExprNameVisitor struct { names []string } func (v *binaryExprNameVisitor) Visit(n Node) Visitor { switch n := n.(type) { case *VarRef: v.names = append(v.names, n.Val) case *Call: v.names = append(v.names, n.Name) return nil } return v } // ParenExpr represents a parenthesized expression. type ParenExpr struct { Expr Expr } // String returns a string representation of the parenthesized expression. func (e *ParenExpr) String() string { return fmt.Sprintf("(%s)", e.Expr.String()) } // RegexLiteral represents a regular expression. type RegexLiteral struct { Val *regexp.Regexp } // String returns a string representation of the literal. func (r *RegexLiteral) String() string { if r.Val != nil { return fmt.Sprintf("/%s/", strings.Replace(r.Val.String(), `/`, `\/`, -1)) } return "" } // CloneRegexLiteral returns a clone of the RegexLiteral. func CloneRegexLiteral(r *RegexLiteral) *RegexLiteral { if r == nil { return nil } clone := &RegexLiteral{} if r.Val != nil { clone.Val = regexp.MustCompile(r.Val.String()) } return clone } // Wildcard represents a wild card expression. type Wildcard struct { Type Token } // String returns a string representation of the wildcard. func (e *Wildcard) String() string { switch e.Type { case FIELD: return "*::field" case TAG: return "*::tag" default: return "*" } } // CloneExpr returns a deep copy of the expression. func CloneExpr(expr Expr) Expr { if expr == nil { return nil } switch expr := expr.(type) { case *BinaryExpr: return &BinaryExpr{Op: expr.Op, LHS: CloneExpr(expr.LHS), RHS: CloneExpr(expr.RHS)} case *BooleanLiteral: return &BooleanLiteral{Val: expr.Val} case *Call: args := make([]Expr, len(expr.Args)) for i, arg := range expr.Args { args[i] = CloneExpr(arg) } return &Call{Name: expr.Name, Args: args} case *Distinct: return &Distinct{Val: expr.Val} case *DurationLiteral: return &DurationLiteral{Val: expr.Val} case *IntegerLiteral: return &IntegerLiteral{Val: expr.Val} case *UnsignedLiteral: return &UnsignedLiteral{Val: expr.Val} case *NumberLiteral: return &NumberLiteral{Val: expr.Val} case *ParenExpr: return &ParenExpr{Expr: CloneExpr(expr.Expr)} case *RegexLiteral: return &RegexLiteral{Val: expr.Val} case *StringLiteral: return &StringLiteral{Val: expr.Val} case *TimeLiteral: return &TimeLiteral{Val: expr.Val} case *VarRef: return &VarRef{Val: expr.Val, Type: expr.Type} case *Wildcard: return &Wildcard{Type: expr.Type} } panic("unreachable") } // HasTimeExpr returns true if the expression has a time term. func HasTimeExpr(expr Expr) bool { switch n := expr.(type) { case *BinaryExpr: if n.Op == AND || n.Op == OR { return HasTimeExpr(n.LHS) || HasTimeExpr(n.RHS) } if ref, ok := n.LHS.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { return true } return false case *ParenExpr: // walk down the tree return HasTimeExpr(n.Expr) default: return false } } // Visitor can be called by Walk to traverse an AST hierarchy. // The Visit() function is called once per node. type Visitor interface { Visit(Node) Visitor } // Walk traverses a node hierarchy in depth-first order. func Walk(v Visitor, node Node) { if node == nil { return } if v = v.Visit(node); v == nil { return } switch n := node.(type) { case *BinaryExpr: Walk(v, n.LHS) Walk(v, n.RHS) case *Call: for _, expr := range n.Args { Walk(v, expr) } case *CreateContinuousQueryStatement: Walk(v, n.Source) case *Dimension: Walk(v, n.Expr) case Dimensions: for _, c := range n { Walk(v, c) } case *DeleteSeriesStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *DropSeriesStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ExplainStatement: Walk(v, n.Statement) case *Field: Walk(v, n.Expr) case Fields: for _, c := range n { Walk(v, c) } case *ParenExpr: Walk(v, n.Expr) case *Query: Walk(v, n.Statements) case *SelectStatement: Walk(v, n.Fields) Walk(v, n.Target) Walk(v, n.Dimensions) Walk(v, n.Sources) Walk(v, n.Condition) Walk(v, n.SortFields) case *ShowFieldKeyCardinalityStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowSeriesStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowSeriesCardinalityStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowMeasurementCardinalityStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowTagKeyCardinalityStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowTagKeysStatement: Walk(v, n.Sources) Walk(v, n.Condition) Walk(v, n.SortFields) case *ShowTagValuesCardinalityStatement: Walk(v, n.Sources) Walk(v, n.Condition) case *ShowTagValuesStatement: Walk(v, n.Sources) Walk(v, n.Condition) Walk(v, n.SortFields) case *ShowFieldKeysStatement: Walk(v, n.Sources) Walk(v, n.SortFields) case SortFields: for _, sf := range n { Walk(v, sf) } case Sources: for _, s := range n { Walk(v, s) } case *SubQuery: Walk(v, n.Statement) case Statements: for _, s := range n { Walk(v, s) } case *Target: if n != nil { Walk(v, n.Measurement) } } } // WalkFunc traverses a node hierarchy in depth-first order. func WalkFunc(node Node, fn func(Node)) { Walk(walkFuncVisitor(fn), node) } type walkFuncVisitor func(Node) func (fn walkFuncVisitor) Visit(n Node) Visitor { fn(n); return fn } // Rewriter can be called by Rewrite to replace nodes in the AST hierarchy. // The Rewrite() function is called once per node. type Rewriter interface { Rewrite(Node) Node } // Rewrite recursively invokes the rewriter to replace each node. // Nodes are traversed depth-first and rewritten from leaf to root. func Rewrite(r Rewriter, node Node) Node { switch n := node.(type) { case *Query: n.Statements = Rewrite(r, n.Statements).(Statements) case Statements: for i, s := range n { n[i] = Rewrite(r, s).(Statement) } case *SelectStatement: n.Fields = Rewrite(r, n.Fields).(Fields) n.Dimensions = Rewrite(r, n.Dimensions).(Dimensions) n.Sources = Rewrite(r, n.Sources).(Sources) // Rewrite may return nil. Nil does not satisfy the Expr // interface. We only assert the rewritten result to be an // Expr if it is not nil: if cond := Rewrite(r, n.Condition); cond != nil { n.Condition = cond.(Expr) } else { n.Condition = nil } case *SubQuery: n.Statement = Rewrite(r, n.Statement).(*SelectStatement) case Fields: for i, f := range n { n[i] = Rewrite(r, f).(*Field) } case *Field: n.Expr = Rewrite(r, n.Expr).(Expr) case Dimensions: for i, d := range n { n[i] = Rewrite(r, d).(*Dimension) } case *Dimension: n.Expr = Rewrite(r, n.Expr).(Expr) case *BinaryExpr: n.LHS = Rewrite(r, n.LHS).(Expr) n.RHS = Rewrite(r, n.RHS).(Expr) case *ParenExpr: n.Expr = Rewrite(r, n.Expr).(Expr) case *Call: for i, expr := range n.Args { n.Args[i] = Rewrite(r, expr).(Expr) } } return r.Rewrite(node) } // RewriteFunc rewrites a node hierarchy. func RewriteFunc(node Node, fn func(Node) Node) Node { return Rewrite(rewriterFunc(fn), node) } type rewriterFunc func(Node) Node func (fn rewriterFunc) Rewrite(n Node) Node { return fn(n) } // RewriteExpr recursively invokes the function to replace each expr. // Nodes are traversed depth-first and rewritten from leaf to root. func RewriteExpr(expr Expr, fn func(Expr) Expr) Expr { switch e := expr.(type) { case *BinaryExpr: e.LHS = RewriteExpr(e.LHS, fn) e.RHS = RewriteExpr(e.RHS, fn) if e.LHS != nil && e.RHS == nil { expr = e.LHS } else if e.RHS != nil && e.LHS == nil { expr = e.RHS } else if e.LHS == nil && e.RHS == nil { return nil } case *ParenExpr: e.Expr = RewriteExpr(e.Expr, fn) if e.Expr == nil { return nil } case *Call: for i, expr := range e.Args { e.Args[i] = RewriteExpr(expr, fn) } } return fn(expr) } // Eval evaluates expr against a map. func Eval(expr Expr, m map[string]interface{}) interface{} { eval := ValuerEval{Valuer: MapValuer(m)} return eval.Eval(expr) } // MapValuer is a valuer that substitutes values for the mapped interface. type MapValuer map[string]interface{} // Value returns the value for a key in the MapValuer. func (m MapValuer) Value(key string) (interface{}, bool) { v, ok := m[key] return v, ok } // ValuerEval will evaluate an expression using the Valuer. type ValuerEval struct { Valuer Valuer // IntegerFloatDivision will set the eval system to treat // a division between two integers as a floating point division. IntegerFloatDivision bool } // Eval evaluates an expression and returns a value. func (v *ValuerEval) Eval(expr Expr) interface{} { if expr == nil { return nil } switch expr := expr.(type) { case *BinaryExpr: return v.evalBinaryExpr(expr) case *BooleanLiteral: return expr.Val case *IntegerLiteral: return expr.Val case *NumberLiteral: return expr.Val case *UnsignedLiteral: return expr.Val case *ParenExpr: return v.Eval(expr.Expr) case *RegexLiteral: return expr.Val case *StringLiteral: return expr.Val case *Call: if valuer, ok := v.Valuer.(CallValuer); ok { var args []interface{} if len(expr.Args) > 0 { args = make([]interface{}, len(expr.Args)) for i := range expr.Args { args[i] = v.Eval(expr.Args[i]) } } val, _ := valuer.Call(expr.Name, args) return val } return nil case *VarRef: val, _ := v.Valuer.Value(expr.Val) return val default: return nil } } // EvalBool evaluates expr and returns true if result is a boolean true. // Otherwise returns false. func (v *ValuerEval) EvalBool(expr Expr) bool { val, _ := v.Eval(expr).(bool) return val } func (v *ValuerEval) evalBinaryExpr(expr *BinaryExpr) interface{} { lhs := v.Eval(expr.LHS) rhs := v.Eval(expr.RHS) if lhs == nil && rhs != nil { // When the LHS is nil and the RHS is a boolean, implicitly cast the // nil to false. if _, ok := rhs.(bool); ok { lhs = false } } else if lhs != nil && rhs == nil { // Implicit cast of the RHS nil to false when the LHS is a boolean. if _, ok := lhs.(bool); ok { rhs = false } } // Evaluate if both sides are simple types. switch lhs := lhs.(type) { case bool: rhs, ok := rhs.(bool) switch expr.Op { case AND: return ok && (lhs && rhs) case OR: return ok && (lhs || rhs) case BITWISE_AND: return ok && (lhs && rhs) case BITWISE_OR: return ok && (lhs || rhs) case BITWISE_XOR: return ok && (lhs != rhs) case EQ: return ok && (lhs == rhs) case NEQ: return ok && (lhs != rhs) } case float64: // Try the rhs as a float64, int64, or uint64 rhsf, ok := rhs.(float64) if !ok { switch val := rhs.(type) { case int64: rhsf, ok = float64(val), true case uint64: rhsf, ok = float64(val), true } } rhs := rhsf switch expr.Op { case EQ: return ok && (lhs == rhs) case NEQ: return ok && (lhs != rhs) case LT: return ok && (lhs < rhs) case LTE: return ok && (lhs <= rhs) case GT: return ok && (lhs > rhs) case GTE: return ok && (lhs >= rhs) case ADD: if !ok { return nil } return lhs + rhs case SUB: if !ok { return nil } return lhs - rhs case MUL: if !ok { return nil } return lhs * rhs case DIV: if !ok { return nil } else if rhs == 0 { return float64(0) } return lhs / rhs case MOD: if !ok { return nil } return math.Mod(lhs, rhs) } case int64: // Try as a float64 to see if a float cast is required. switch rhs := rhs.(type) { case float64: lhs := float64(lhs) switch expr.Op { case EQ: return lhs == rhs case NEQ: return lhs != rhs case LT: return lhs < rhs case LTE: return lhs <= rhs case GT: return lhs > rhs case GTE: return lhs >= rhs case ADD: return lhs + rhs case SUB: return lhs - rhs case MUL: return lhs * rhs case DIV: if rhs == 0 { return float64(0) } return lhs / rhs case MOD: return math.Mod(lhs, rhs) } case int64: switch expr.Op { case EQ: return lhs == rhs case NEQ: return lhs != rhs case LT: return lhs < rhs case LTE: return lhs <= rhs case GT: return lhs > rhs case GTE: return lhs >= rhs case ADD: return lhs + rhs case SUB: return lhs - rhs case MUL: return lhs * rhs case DIV: if v.IntegerFloatDivision { if rhs == 0 { return float64(0) } return float64(lhs) / float64(rhs) } if rhs == 0 { return int64(0) } return lhs / rhs case MOD: if rhs == 0 { return int64(0) } return lhs % rhs case BITWISE_AND: return lhs & rhs case BITWISE_OR: return lhs | rhs case BITWISE_XOR: return lhs ^ rhs } case uint64: switch expr.Op { case EQ: return uint64(lhs) == rhs case NEQ: return uint64(lhs) != rhs case LT: if lhs < 0 { return true } return uint64(lhs) < rhs case LTE: if lhs < 0 { return true } return uint64(lhs) <= rhs case GT: if lhs < 0 { return false } return uint64(lhs) > rhs case GTE: if lhs < 0 { return false } return uint64(lhs) >= rhs case ADD: return uint64(lhs) + rhs case SUB: return uint64(lhs) - rhs case MUL: return uint64(lhs) * rhs case DIV: if rhs == 0 { return uint64(0) } return uint64(lhs) / rhs case MOD: if rhs == 0 { return uint64(0) } return uint64(lhs) % rhs case BITWISE_AND: return uint64(lhs) & rhs case BITWISE_OR: return uint64(lhs) | rhs case BITWISE_XOR: return uint64(lhs) ^ rhs } } case uint64: // Try as a float64 to see if a float cast is required. switch rhs := rhs.(type) { case float64: lhs := float64(lhs) switch expr.Op { case EQ: return lhs == rhs case NEQ: return lhs != rhs case LT: return lhs < rhs case LTE: return lhs <= rhs case GT: return lhs > rhs case GTE: return lhs >= rhs case ADD: return lhs + rhs case SUB: return lhs - rhs case MUL: return lhs * rhs case DIV: if rhs == 0 { return float64(0) } return lhs / rhs case MOD: return math.Mod(lhs, rhs) } case int64: switch expr.Op { case EQ: return lhs == uint64(rhs) case NEQ: return lhs != uint64(rhs) case LT: if rhs < 0 { return false } return lhs < uint64(rhs) case LTE: if rhs < 0 { return false } return lhs <= uint64(rhs) case GT: if rhs < 0 { return true } return lhs > uint64(rhs) case GTE: if rhs < 0 { return true } return lhs >= uint64(rhs) case ADD: return lhs + uint64(rhs) case SUB: return lhs - uint64(rhs) case MUL: return lhs * uint64(rhs) case DIV: if rhs == 0 { return uint64(0) } return lhs / uint64(rhs) case MOD: if rhs == 0 { return uint64(0) } return lhs % uint64(rhs) case BITWISE_AND: return lhs & uint64(rhs) case BITWISE_OR: return lhs | uint64(rhs) case BITWISE_XOR: return lhs ^ uint64(rhs) } case uint64: switch expr.Op { case EQ: return lhs == rhs case NEQ: return lhs != rhs case LT: return lhs < rhs case LTE: return lhs <= rhs case GT: return lhs > rhs case GTE: return lhs >= rhs case ADD: return lhs + rhs case SUB: return lhs - rhs case MUL: return lhs * rhs case DIV: if rhs == 0 { return uint64(0) } return lhs / rhs case MOD: if rhs == 0 { return uint64(0) } return lhs % rhs case BITWISE_AND: return lhs & rhs case BITWISE_OR: return lhs | rhs case BITWISE_XOR: return lhs ^ rhs } } case string: switch expr.Op { case EQ: rhs, ok := rhs.(string) if !ok { return false } return lhs == rhs case NEQ: rhs, ok := rhs.(string) if !ok { return false } return lhs != rhs case EQREGEX: rhs, ok := rhs.(*regexp.Regexp) if !ok { return false } return rhs.MatchString(lhs) case NEQREGEX: rhs, ok := rhs.(*regexp.Regexp) if !ok { return false } return !rhs.MatchString(lhs) } } // The types were not comparable. If our operation was an equality operation, // return false instead of true. switch expr.Op { case EQ, NEQ, LT, LTE, GT, GTE: return false } return nil } // EvalBool evaluates expr and returns true if result is a boolean true. // Otherwise returns false. func EvalBool(expr Expr, m map[string]interface{}) bool { v, _ := Eval(expr, m).(bool) return v } // TypeMapper maps a data type to the measurement and field. type TypeMapper interface { MapType(measurement *Measurement, field string) DataType } // CallTypeMapper maps a data type to the function call. type CallTypeMapper interface { TypeMapper CallType(name string, args []DataType) (DataType, error) } type nilTypeMapper struct{} func (nilTypeMapper) MapType(*Measurement, string) DataType { return Unknown } type multiTypeMapper []TypeMapper // MultiTypeMapper combines multiple TypeMappers into a single one. // The MultiTypeMapper will return the first type that is not Unknown. // It will not iterate through all of them to find the highest priority one. func MultiTypeMapper(mappers ...TypeMapper) TypeMapper { return multiTypeMapper(mappers) } func (a multiTypeMapper) MapType(measurement *Measurement, field string) DataType { for _, m := range a { if typ := m.MapType(measurement, field); typ != Unknown { return typ } } return Unknown } func (a multiTypeMapper) CallType(name string, args []DataType) (DataType, error) { for _, m := range a { call, ok := m.(CallTypeMapper) if ok { typ, err := call.CallType(name, args) if err != nil { return Unknown, err } else if typ != Unknown { return typ, nil } } } return Unknown, nil } // TypeValuerEval evaluates an expression to determine its output type. type TypeValuerEval struct { TypeMapper TypeMapper Sources Sources } // EvalType returns the type for an expression. If the expression cannot // be evaluated for some reason, like incompatible types, it is returned // as a TypeError in the error. If the error is non-fatal so we can continue // even though an error happened, true will be returned. // This function assumes that the expression has already been reduced. func (v *TypeValuerEval) EvalType(expr Expr) (DataType, error) { switch expr := expr.(type) { case *VarRef: return v.evalVarRefExprType(expr) case *Call: return v.evalCallExprType(expr) case *BinaryExpr: return v.evalBinaryExprType(expr) case *ParenExpr: return v.EvalType(expr.Expr) case *NumberLiteral: return Float, nil case *IntegerLiteral: return Integer, nil case *UnsignedLiteral: return Unsigned, nil case *StringLiteral: return String, nil case *BooleanLiteral: return Boolean, nil } return Unknown, nil } func (v *TypeValuerEval) evalVarRefExprType(expr *VarRef) (DataType, error) { // If this variable already has an assigned type, just use that. if expr.Type != Unknown && expr.Type != AnyField { return expr.Type, nil } var typ DataType if v.TypeMapper != nil { for _, src := range v.Sources { switch src := src.(type) { case *Measurement: if t := v.TypeMapper.MapType(src, expr.Val); typ.LessThan(t) { typ = t } case *SubQuery: _, e := src.Statement.FieldExprByName(expr.Val) if e != nil { valuer := TypeValuerEval{ TypeMapper: v.TypeMapper, Sources: src.Statement.Sources, } if t, err := valuer.EvalType(e); err != nil { return Unknown, err } else if typ.LessThan(t) { typ = t } } if typ == Unknown { for _, d := range src.Statement.Dimensions { if d, ok := d.Expr.(*VarRef); ok && expr.Val == d.Val { typ = Tag } } } } } } return typ, nil } func (v *TypeValuerEval) evalCallExprType(expr *Call) (DataType, error) { typmap, ok := v.TypeMapper.(CallTypeMapper) if !ok { return Unknown, nil } // Evaluate all of the data types for the arguments. args := make([]DataType, len(expr.Args)) for i, arg := range expr.Args { typ, err := v.EvalType(arg) if err != nil { return Unknown, err } args[i] = typ } // Pass in the data types for the call so it can be type checked and // the resulting type can be returned. return typmap.CallType(expr.Name, args) } func (v *TypeValuerEval) evalBinaryExprType(expr *BinaryExpr) (DataType, error) { // Find the data type for both sides of the expression. lhs, err := v.EvalType(expr.LHS) if err != nil { return Unknown, err } rhs, err := v.EvalType(expr.RHS) if err != nil { return Unknown, err } // If one of the two is unsigned and the other is an integer, we cannot add // the two without an explicit cast unless the integer is a literal. if lhs == Unsigned && rhs == Integer { if isLiteral(expr.LHS) { return Unknown, &TypeError{ Expr: expr, Message: fmt.Sprintf("cannot use %s with an integer and unsigned literal", expr.Op), } } else if !isLiteral(expr.RHS) { return Unknown, &TypeError{ Expr: expr, Message: fmt.Sprintf("cannot use %s between an integer and unsigned, an explicit cast is required", expr.Op), } } } else if lhs == Integer && rhs == Unsigned { if isLiteral(expr.RHS) { return Unknown, &TypeError{ Expr: expr, Message: fmt.Sprintf("cannot use %s with an integer and unsigned literal", expr.Op), } } else if !isLiteral(expr.LHS) { return Unknown, &TypeError{ Expr: expr, Message: fmt.Sprintf("cannot use %s between an integer and unsigned, an explicit cast is required", expr.Op), } } } // If one of the two is unknown, then return the other as the type. if lhs == Unknown { return rhs, nil } else if rhs == Unknown { return lhs, nil } // Rather than re-implement the ValuerEval here, we create a dummy binary // expression with the zero values and inspect the resulting value back into // a data type to determine the output. e := BinaryExpr{ LHS: &VarRef{Val: "lhs"}, RHS: &VarRef{Val: "rhs"}, Op: expr.Op, } result := Eval(&e, map[string]interface{}{ "lhs": lhs.Zero(), "rhs": rhs.Zero(), }) typ := InspectDataType(result) if typ == Unknown { // If the type is unknown, then the two types were not compatible. return Unknown, &TypeError{ Expr: expr, Message: fmt.Sprintf("incompatible types: %s and %s", lhs, rhs), } } return typ, nil } // TypeError is an error when two types are incompatible. type TypeError struct { // Expr contains the expression that generated the type error. Expr Expr // Message contains the informational message about the type error. Message string } func (e *TypeError) Error() string { return fmt.Sprintf("type error: %s: %s", e.Expr, e.Message) } // EvalType evaluates the expression's type. func EvalType(expr Expr, sources Sources, typmap TypeMapper) DataType { if typmap == nil { typmap = nilTypeMapper{} } valuer := TypeValuerEval{ TypeMapper: typmap, Sources: sources, } typ, _ := valuer.EvalType(expr) return typ } func FieldDimensions(sources Sources, m FieldMapper) (fields map[string]DataType, dimensions map[string]struct{}, err error) { fields = make(map[string]DataType) dimensions = make(map[string]struct{}) for _, src := range sources { switch src := src.(type) { case *Measurement: f, d, err := m.FieldDimensions(src) if err != nil { return nil, nil, err } for k, typ := range f { if fields[k].LessThan(typ) { fields[k] = typ } } for k := range d { dimensions[k] = struct{}{} } case *SubQuery: for _, f := range src.Statement.Fields { k := f.Name() typ := EvalType(f.Expr, src.Statement.Sources, m) if fields[k].LessThan(typ) { fields[k] = typ } } for _, d := range src.Statement.Dimensions { if expr, ok := d.Expr.(*VarRef); ok { dimensions[expr.Val] = struct{}{} } } } } return } // Reduce evaluates expr using the available values in valuer. // References that don't exist in valuer are ignored. func Reduce(expr Expr, valuer Valuer) Expr { expr = reduce(expr, valuer) // Unwrap parens at top level. if expr, ok := expr.(*ParenExpr); ok { return expr.Expr } return expr } func reduce(expr Expr, valuer Valuer) Expr { if expr == nil { return nil } switch expr := expr.(type) { case *BinaryExpr: return reduceBinaryExpr(expr, valuer) case *Call: return reduceCall(expr, valuer) case *ParenExpr: return reduceParenExpr(expr, valuer) case *VarRef: return reduceVarRef(expr, valuer) case *NilLiteral: return expr default: return CloneExpr(expr) } } func reduceBinaryExpr(expr *BinaryExpr, valuer Valuer) Expr { // Reduce both sides first. op := expr.Op lhs := reduce(expr.LHS, valuer) rhs := reduce(expr.RHS, valuer) loc := time.UTC if valuer, ok := valuer.(ZoneValuer); ok { if l := valuer.Zone(); l != nil { loc = l } } // Do not evaluate if one side is nil. if lhs == nil || rhs == nil { return &BinaryExpr{LHS: lhs, RHS: rhs, Op: expr.Op} } // If we have a logical operator (AND, OR) and one side is a boolean literal // then we need to have special handling. if op == AND { if isFalseLiteral(lhs) || isFalseLiteral(rhs) { return &BooleanLiteral{Val: false} } else if isTrueLiteral(lhs) { return rhs } else if isTrueLiteral(rhs) { return lhs } } else if op == OR { if isTrueLiteral(lhs) || isTrueLiteral(rhs) { return &BooleanLiteral{Val: true} } else if isFalseLiteral(lhs) { return rhs } else if isFalseLiteral(rhs) { return lhs } } // Evaluate if both sides are simple types. switch lhs := lhs.(type) { case *BooleanLiteral: return reduceBinaryExprBooleanLHS(op, lhs, rhs) case *DurationLiteral: return reduceBinaryExprDurationLHS(op, lhs, rhs, loc) case *IntegerLiteral: return reduceBinaryExprIntegerLHS(op, lhs, rhs, loc) case *UnsignedLiteral: return reduceBinaryExprUnsignedLHS(op, lhs, rhs) case *NilLiteral: return reduceBinaryExprNilLHS(op, lhs, rhs) case *NumberLiteral: return reduceBinaryExprNumberLHS(op, lhs, rhs) case *StringLiteral: return reduceBinaryExprStringLHS(op, lhs, rhs, loc) case *TimeLiteral: return reduceBinaryExprTimeLHS(op, lhs, rhs, loc) default: return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } } func reduceBinaryExprBooleanLHS(op Token, lhs *BooleanLiteral, rhs Expr) Expr { switch rhs := rhs.(type) { case *BooleanLiteral: switch op { case EQ: return &BooleanLiteral{Val: lhs.Val == rhs.Val} case NEQ: return &BooleanLiteral{Val: lhs.Val != rhs.Val} case AND: return &BooleanLiteral{Val: lhs.Val && rhs.Val} case OR: return &BooleanLiteral{Val: lhs.Val || rhs.Val} case BITWISE_AND: return &BooleanLiteral{Val: lhs.Val && rhs.Val} case BITWISE_OR: return &BooleanLiteral{Val: lhs.Val || rhs.Val} case BITWISE_XOR: return &BooleanLiteral{Val: lhs.Val != rhs.Val} } case *NilLiteral: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprDurationLHS(op Token, lhs *DurationLiteral, rhs Expr, loc *time.Location) Expr { switch rhs := rhs.(type) { case *DurationLiteral: switch op { case ADD: return &DurationLiteral{Val: lhs.Val + rhs.Val} case SUB: return &DurationLiteral{Val: lhs.Val - rhs.Val} case EQ: return &BooleanLiteral{Val: lhs.Val == rhs.Val} case NEQ: return &BooleanLiteral{Val: lhs.Val != rhs.Val} case GT: return &BooleanLiteral{Val: lhs.Val > rhs.Val} case GTE: return &BooleanLiteral{Val: lhs.Val >= rhs.Val} case LT: return &BooleanLiteral{Val: lhs.Val < rhs.Val} case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } case *NumberLiteral: switch op { case MUL: return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} case DIV: if rhs.Val == 0 { return &DurationLiteral{Val: 0} } return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} } case *IntegerLiteral: switch op { case MUL: return &DurationLiteral{Val: lhs.Val * time.Duration(rhs.Val)} case DIV: if rhs.Val == 0 { return &DurationLiteral{Val: 0} } return &DurationLiteral{Val: lhs.Val / time.Duration(rhs.Val)} } case *TimeLiteral: switch op { case ADD: return &TimeLiteral{Val: rhs.Val.Add(lhs.Val)} } case *StringLiteral: t, err := rhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprDurationLHS(op, lhs, t, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } case *NilLiteral: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprIntegerLHS(op Token, lhs *IntegerLiteral, rhs Expr, loc *time.Location) Expr { switch rhs := rhs.(type) { case *NumberLiteral: return reduceBinaryExprNumberLHS(op, &NumberLiteral{Val: float64(lhs.Val)}, rhs) case *IntegerLiteral: switch op { case ADD: return &IntegerLiteral{Val: lhs.Val + rhs.Val} case SUB: return &IntegerLiteral{Val: lhs.Val - rhs.Val} case MUL: return &IntegerLiteral{Val: lhs.Val * rhs.Val} case DIV: if rhs.Val == 0 { return &NumberLiteral{Val: 0} } return &NumberLiteral{Val: float64(lhs.Val) / float64(rhs.Val)} case MOD: if rhs.Val == 0 { return &IntegerLiteral{Val: 0} } return &IntegerLiteral{Val: lhs.Val % rhs.Val} case BITWISE_AND: return &IntegerLiteral{Val: lhs.Val & rhs.Val} case BITWISE_OR: return &IntegerLiteral{Val: lhs.Val | rhs.Val} case BITWISE_XOR: return &IntegerLiteral{Val: lhs.Val ^ rhs.Val} case EQ: return &BooleanLiteral{Val: lhs.Val == rhs.Val} case NEQ: return &BooleanLiteral{Val: lhs.Val != rhs.Val} case GT: return &BooleanLiteral{Val: lhs.Val > rhs.Val} case GTE: return &BooleanLiteral{Val: lhs.Val >= rhs.Val} case LT: return &BooleanLiteral{Val: lhs.Val < rhs.Val} case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } case *UnsignedLiteral: // Comparisons between an unsigned and integer literal will not involve // a cast if the integer is negative as that will have an improper result. // Look for those situations here. if lhs.Val < 0 { switch op { case LT, LTE: return &BooleanLiteral{Val: true} case GT, GTE: return &BooleanLiteral{Val: false} } } return reduceBinaryExprUnsignedLHS(op, &UnsignedLiteral{Val: uint64(lhs.Val)}, rhs) case *DurationLiteral: // Treat the integer as a timestamp. switch op { case ADD: return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(rhs.Val)} case SUB: return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(-rhs.Val)} } case *TimeLiteral: d := &DurationLiteral{Val: time.Duration(lhs.Val)} expr := reduceBinaryExprDurationLHS(op, d, rhs, loc) if _, ok := expr.(*BinaryExpr); !ok { return expr } case *StringLiteral: t, err := rhs.ToTimeLiteral(loc) if err != nil { break } d := &DurationLiteral{Val: time.Duration(lhs.Val)} expr := reduceBinaryExprDurationLHS(op, d, t, loc) if _, ok := expr.(*BinaryExpr); !ok { return expr } case *NilLiteral: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprUnsignedLHS(op Token, lhs *UnsignedLiteral, rhs Expr) Expr { switch rhs := rhs.(type) { case *NumberLiteral: return reduceBinaryExprNumberLHS(op, &NumberLiteral{Val: float64(lhs.Val)}, rhs) case *IntegerLiteral: // Comparisons between an unsigned and integer literal will not involve // a cast if the integer is negative as that will have an improper result. // Look for those situations here. if rhs.Val < 0 { switch op { case LT, LTE: return &BooleanLiteral{Val: false} case GT, GTE: return &BooleanLiteral{Val: true} } } return reduceBinaryExprUnsignedLHS(op, lhs, &UnsignedLiteral{Val: uint64(rhs.Val)}) case *UnsignedLiteral: switch op { case ADD: return &UnsignedLiteral{Val: lhs.Val + rhs.Val} case SUB: return &UnsignedLiteral{Val: lhs.Val - rhs.Val} case MUL: return &UnsignedLiteral{Val: lhs.Val * rhs.Val} case DIV: if rhs.Val == 0 { return &UnsignedLiteral{Val: 0} } return &UnsignedLiteral{Val: lhs.Val / rhs.Val} case MOD: if rhs.Val == 0 { return &UnsignedLiteral{Val: 0} } return &UnsignedLiteral{Val: lhs.Val % rhs.Val} case EQ: return &BooleanLiteral{Val: lhs.Val == rhs.Val} case NEQ: return &BooleanLiteral{Val: lhs.Val != rhs.Val} case GT: return &BooleanLiteral{Val: lhs.Val > rhs.Val} case GTE: return &BooleanLiteral{Val: lhs.Val >= rhs.Val} case LT: return &BooleanLiteral{Val: lhs.Val < rhs.Val} case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprNilLHS(op Token, lhs *NilLiteral, rhs Expr) Expr { switch op { case EQ, NEQ: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprNumberLHS(op Token, lhs *NumberLiteral, rhs Expr) Expr { switch rhs := rhs.(type) { case *NumberLiteral: switch op { case ADD: return &NumberLiteral{Val: lhs.Val + rhs.Val} case SUB: return &NumberLiteral{Val: lhs.Val - rhs.Val} case MUL: return &NumberLiteral{Val: lhs.Val * rhs.Val} case DIV: if rhs.Val == 0 { return &NumberLiteral{Val: 0} } return &NumberLiteral{Val: lhs.Val / rhs.Val} case MOD: return &NumberLiteral{Val: math.Mod(lhs.Val, rhs.Val)} case EQ: return &BooleanLiteral{Val: lhs.Val == rhs.Val} case NEQ: return &BooleanLiteral{Val: lhs.Val != rhs.Val} case GT: return &BooleanLiteral{Val: lhs.Val > rhs.Val} case GTE: return &BooleanLiteral{Val: lhs.Val >= rhs.Val} case LT: return &BooleanLiteral{Val: lhs.Val < rhs.Val} case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } case *IntegerLiteral: switch op { case ADD: return &NumberLiteral{Val: lhs.Val + float64(rhs.Val)} case SUB: return &NumberLiteral{Val: lhs.Val - float64(rhs.Val)} case MUL: return &NumberLiteral{Val: lhs.Val * float64(rhs.Val)} case DIV: if float64(rhs.Val) == 0 { return &NumberLiteral{Val: 0} } return &NumberLiteral{Val: lhs.Val / float64(rhs.Val)} case MOD: return &NumberLiteral{Val: math.Mod(lhs.Val, float64(rhs.Val))} case EQ: return &BooleanLiteral{Val: lhs.Val == float64(rhs.Val)} case NEQ: return &BooleanLiteral{Val: lhs.Val != float64(rhs.Val)} case GT: return &BooleanLiteral{Val: lhs.Val > float64(rhs.Val)} case GTE: return &BooleanLiteral{Val: lhs.Val >= float64(rhs.Val)} case LT: return &BooleanLiteral{Val: lhs.Val < float64(rhs.Val)} case LTE: return &BooleanLiteral{Val: lhs.Val <= float64(rhs.Val)} } case *UnsignedLiteral: return reduceBinaryExprNumberLHS(op, lhs, &NumberLiteral{Val: float64(rhs.Val)}) case *NilLiteral: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprStringLHS(op Token, lhs *StringLiteral, rhs Expr, loc *time.Location) Expr { switch rhs := rhs.(type) { case *StringLiteral: switch op { case EQ: var expr Expr = &BooleanLiteral{Val: lhs.Val == rhs.Val} // This might be a comparison between time literals. // If it is, parse the time literals and then compare since it // could be a different result if they use different formats // for the same time. if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { tlhs, err := lhs.ToTimeLiteral(loc) if err != nil { return expr } trhs, err := rhs.ToTimeLiteral(loc) if err != nil { return expr } t := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc) if _, ok := t.(*BinaryExpr); !ok { expr = t } } return expr case NEQ: var expr Expr = &BooleanLiteral{Val: lhs.Val != rhs.Val} // This might be a comparison between time literals. // If it is, parse the time literals and then compare since it // could be a different result if they use different formats // for the same time. if lhs.IsTimeLiteral() && rhs.IsTimeLiteral() { tlhs, err := lhs.ToTimeLiteral(loc) if err != nil { return expr } trhs, err := rhs.ToTimeLiteral(loc) if err != nil { return expr } t := reduceBinaryExprTimeLHS(op, tlhs, trhs, loc) if _, ok := t.(*BinaryExpr); !ok { expr = t } } return expr case ADD: return &StringLiteral{Val: lhs.Val + rhs.Val} default: // Attempt to convert the string literal to a time literal. t, err := lhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } } case *DurationLiteral: // Attempt to convert the string literal to a time literal. t, err := lhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } case *TimeLiteral: // Attempt to convert the string literal to a time literal. t, err := lhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } case *IntegerLiteral: // Attempt to convert the string literal to a time literal. t, err := lhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprTimeLHS(op, t, rhs, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } case *NilLiteral: switch op { case EQ, NEQ: return &BooleanLiteral{Val: false} } } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceBinaryExprTimeLHS(op Token, lhs *TimeLiteral, rhs Expr, loc *time.Location) Expr { switch rhs := rhs.(type) { case *DurationLiteral: switch op { case ADD: return &TimeLiteral{Val: lhs.Val.Add(rhs.Val)} case SUB: return &TimeLiteral{Val: lhs.Val.Add(-rhs.Val)} } case *IntegerLiteral: d := &DurationLiteral{Val: time.Duration(rhs.Val)} expr := reduceBinaryExprTimeLHS(op, lhs, d, loc) if _, ok := expr.(*BinaryExpr); !ok { return expr } case *TimeLiteral: switch op { case SUB: return &DurationLiteral{Val: lhs.Val.Sub(rhs.Val)} case EQ: return &BooleanLiteral{Val: lhs.Val.Equal(rhs.Val)} case NEQ: return &BooleanLiteral{Val: !lhs.Val.Equal(rhs.Val)} case GT: return &BooleanLiteral{Val: lhs.Val.After(rhs.Val)} case GTE: return &BooleanLiteral{Val: lhs.Val.After(rhs.Val) || lhs.Val.Equal(rhs.Val)} case LT: return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val)} case LTE: return &BooleanLiteral{Val: lhs.Val.Before(rhs.Val) || lhs.Val.Equal(rhs.Val)} } case *StringLiteral: t, err := rhs.ToTimeLiteral(loc) if err != nil { break } expr := reduceBinaryExprTimeLHS(op, lhs, t, loc) // If the returned expression is still a binary expr, that means // we couldn't reduce it so this wasn't used in a time literal context. if _, ok := expr.(*BinaryExpr); !ok { return expr } case *NilLiteral: return &BooleanLiteral{Val: false} } return &BinaryExpr{Op: op, LHS: lhs, RHS: rhs} } func reduceCall(expr *Call, valuer Valuer) Expr { // Otherwise reduce arguments. var args []Expr literalsOnly := true if len(expr.Args) > 0 { args = make([]Expr, len(expr.Args)) for i, arg := range expr.Args { args[i] = reduce(arg, valuer) if !isLiteral(args[i]) { literalsOnly = false } } } // Evaluate a function call if the valuer is a CallValuer and // the arguments are only literals. if literalsOnly { if valuer, ok := valuer.(CallValuer); ok { argVals := make([]interface{}, len(args)) for i := range args { argVals[i] = Eval(args[i], nil) } if v, ok := valuer.Call(expr.Name, argVals); ok { return asLiteral(v) } } } return &Call{Name: expr.Name, Args: args} } func reduceParenExpr(expr *ParenExpr, valuer Valuer) Expr { subexpr := reduce(expr.Expr, valuer) if subexpr, ok := subexpr.(*BinaryExpr); ok { return &ParenExpr{Expr: subexpr} } return subexpr } func reduceVarRef(expr *VarRef, valuer Valuer) Expr { // Ignore if there is no valuer. if valuer == nil { return &VarRef{Val: expr.Val, Type: expr.Type} } // Retrieve the value of the ref. // Ignore if the value doesn't exist. v, ok := valuer.Value(expr.Val) if !ok { return &VarRef{Val: expr.Val, Type: expr.Type} } // Return the value as a literal. return asLiteral(v) } // asLiteral takes an interface and converts it into an influxql literal. func asLiteral(v interface{}) Literal { switch v := v.(type) { case bool: return &BooleanLiteral{Val: v} case time.Duration: return &DurationLiteral{Val: v} case float64: return &NumberLiteral{Val: v} case int64: return &IntegerLiteral{Val: v} case string: return &StringLiteral{Val: v} case time.Time: return &TimeLiteral{Val: v} default: return &NilLiteral{} } } // isLiteral returns if the expression is a literal. func isLiteral(expr Expr) bool { _, ok := expr.(Literal) return ok } // Valuer is the interface that wraps the Value() method. type Valuer interface { // Value returns the value and existence flag for a given key. Value(key string) (interface{}, bool) } // CallValuer implements the Call method for evaluating function calls. type CallValuer interface { Valuer // Call is invoked to evaluate a function call (if possible). Call(name string, args []interface{}) (interface{}, bool) } // ZoneValuer is the interface that specifies the current time zone. type ZoneValuer interface { Valuer // Zone returns the time zone location. This function may return nil // if no time zone is known. Zone() *time.Location } var _ CallValuer = (*NowValuer)(nil) var _ ZoneValuer = (*NowValuer)(nil) // NowValuer returns only the value for "now()". type NowValuer struct { Now time.Time Location *time.Location } // Value is a method that returns the value and existence flag for a given key. func (v *NowValuer) Value(key string) (interface{}, bool) { if !v.Now.IsZero() && key == "now()" { return v.Now, true } return nil, false } // Call evaluates the now() function to replace now() with the current time. func (v *NowValuer) Call(name string, args []interface{}) (interface{}, bool) { if name == "now" && len(args) == 0 { return v.Now, true } return nil, false } // Zone is a method that returns the time.Location. func (v *NowValuer) Zone() *time.Location { if v.Location != nil { return v.Location } return nil } // MultiValuer returns a Valuer that iterates over multiple Valuer instances // to find a match. func MultiValuer(valuers ...Valuer) Valuer { return multiValuer(valuers) } type multiValuer []Valuer var _ CallValuer = multiValuer(nil) var _ ZoneValuer = multiValuer(nil) func (a multiValuer) Value(key string) (interface{}, bool) { for _, valuer := range a { if v, ok := valuer.Value(key); ok { return v, true } } return nil, false } func (a multiValuer) Call(name string, args []interface{}) (interface{}, bool) { for _, valuer := range a { if valuer, ok := valuer.(CallValuer); ok { if v, ok := valuer.Call(name, args); ok { return v, true } } } return nil, false } func (a multiValuer) Zone() *time.Location { for _, valuer := range a { if valuer, ok := valuer.(ZoneValuer); ok { if v := valuer.Zone(); v != nil { return v } } } return nil } // ContainsVarRef returns true if expr is a VarRef or contains one. func ContainsVarRef(expr Expr) bool { var v containsVarRefVisitor Walk(&v, expr) return v.contains } type containsVarRefVisitor struct { contains bool } func (v *containsVarRefVisitor) Visit(n Node) Visitor { switch n.(type) { case *Call: return nil case *VarRef: v.contains = true } return v } func IsSelector(expr Expr) bool { if call, ok := expr.(*Call); ok { switch call.Name { case "first", "last", "min", "max", "percentile", "sample", "top", "bottom": return true } } return false } // stringSetSlice returns a sorted slice of keys from a string set. func stringSetSlice(m map[string]struct{}) []string { if m == nil { return nil } a := make([]string, 0, len(m)) for k := range m { a = append(a, k) } sort.Strings(a) return a } // TimeRange represents a range of time from Min to Max. The times are inclusive. type TimeRange struct { Min, Max time.Time } // Intersect joins this TimeRange with another TimeRange. func (t TimeRange) Intersect(other TimeRange) TimeRange { if !other.Min.IsZero() { if t.Min.IsZero() || other.Min.After(t.Min) { t.Min = other.Min } } if !other.Max.IsZero() { if t.Max.IsZero() || other.Max.Before(t.Max) { t.Max = other.Max } } return t } // IsZero is true if the min and max of the time range are zero. func (t TimeRange) IsZero() bool { return t.Min.IsZero() && t.Max.IsZero() } // Used by TimeRange methods. var minTime = time.Unix(0, MinTime) var maxTime = time.Unix(0, MaxTime) // MinTime returns the minimum time of the TimeRange. // If the minimum time is zero, this returns the minimum possible time. func (t TimeRange) MinTime() time.Time { if t.Min.IsZero() { return minTime } return t.Min } // MaxTime returns the maximum time of the TimeRange. // If the maximum time is zero, this returns the maximum possible time. func (t TimeRange) MaxTime() time.Time { if t.Max.IsZero() { return maxTime } return t.Max } // MinTimeNano returns the minimum time in nanoseconds since the epoch. // If the minimum time is zero, this returns the minimum possible time. func (t TimeRange) MinTimeNano() int64 { if t.Min.IsZero() { return MinTime } return t.Min.UnixNano() } // MaxTimeNano returns the maximum time in nanoseconds since the epoch. // If the maximum time is zero, this returns the maximum possible time. func (t TimeRange) MaxTimeNano() int64 { if t.Max.IsZero() { return MaxTime } return t.Max.UnixNano() } // ConditionExpr extracts the time range and the condition from an expression. // We only support simple time ranges that are constrained with AND and are not nested. // This throws an error when we encounter a time condition that is combined with OR // to prevent returning unexpected results that we do not support. func ConditionExpr(cond Expr, valuer Valuer) (Expr, TimeRange, error) { expr, tr, err := conditionExpr(cond, valuer) // Remove top level parentheses if e, ok := expr.(*ParenExpr); ok { expr = e.Expr } if e, ok := expr.(*BooleanLiteral); ok && e.Val { // If the condition is true, return nil instead to indicate there // is no condition. expr = nil } return expr, tr, err } func conditionExpr(cond Expr, valuer Valuer) (Expr, TimeRange, error) { if cond == nil { return nil, TimeRange{}, nil } switch cond := cond.(type) { case *BinaryExpr: if cond.Op == AND || cond.Op == OR { lhsExpr, lhsTime, err := conditionExpr(cond.LHS, valuer) if err != nil { return nil, TimeRange{}, err } rhsExpr, rhsTime, err := conditionExpr(cond.RHS, valuer) if err != nil { return nil, TimeRange{}, err } // Always intersect the time range even if it makes no sense. // There is no such thing as using OR with a time range. timeRange := lhsTime.Intersect(rhsTime) // Combine the left and right expression. if rhsExpr == nil { return lhsExpr, timeRange, nil } else if lhsExpr == nil { return rhsExpr, timeRange, nil } return reduce(&BinaryExpr{ Op: cond.Op, LHS: lhsExpr, RHS: rhsExpr, }, nil), timeRange, nil } // If either the left or the right side is "time", we are looking at // a time range. if lhs, ok := cond.LHS.(*VarRef); ok && strings.ToLower(lhs.Val) == "time" { timeRange, err := getTimeRange(cond.Op, cond.RHS, valuer) return nil, timeRange, err } else if rhs, ok := cond.RHS.(*VarRef); ok && strings.ToLower(rhs.Val) == "time" { // Swap the op for the opposite if it is a comparison. op := cond.Op switch op { case GT: op = LT case LT: op = GT case GTE: op = LTE case LTE: op = GTE } timeRange, err := getTimeRange(op, cond.LHS, valuer) return nil, timeRange, err } return reduce(cond, valuer), TimeRange{}, nil case *ParenExpr: expr, timeRange, err := conditionExpr(cond.Expr, valuer) if err != nil { return nil, TimeRange{}, err } else if expr == nil { return nil, timeRange, nil } return reduce(&ParenExpr{Expr: expr}, nil), timeRange, nil case *BooleanLiteral: return cond, TimeRange{}, nil default: return nil, TimeRange{}, fmt.Errorf("invalid condition expression: %s", cond) } } // getTimeRange returns the time range associated with this comparison. // op is the operation that is used for comparison and rhs is the right hand side // of the expression. The left hand side is always assumed to be "time". func getTimeRange(op Token, rhs Expr, valuer Valuer) (TimeRange, error) { // If literal looks like a date time then parse it as a time literal. if strlit, ok := rhs.(*StringLiteral); ok { if strlit.IsTimeLiteral() { var loc *time.Location if valuer, ok := valuer.(ZoneValuer); ok { loc = valuer.Zone() } t, err := strlit.ToTimeLiteral(loc) if err != nil { return TimeRange{}, err } rhs = t } } // Evaluate the RHS to replace "now()" with the current time. rhs = Reduce(rhs, valuer) var value time.Time switch lit := rhs.(type) { case *TimeLiteral: if lit.Val.After(time.Unix(0, MaxTime)) { return TimeRange{}, fmt.Errorf("time %s overflows time literal", lit.Val.Format(time.RFC3339)) } else if lit.Val.Before(time.Unix(0, MinTime+1)) { // The minimum allowable time literal is one greater than the minimum time because the minimum time // is a sentinel value only used internally. return TimeRange{}, fmt.Errorf("time %s underflows time literal", lit.Val.Format(time.RFC3339)) } value = lit.Val case *DurationLiteral: value = time.Unix(0, int64(lit.Val)).UTC() case *NumberLiteral: value = time.Unix(0, int64(lit.Val)).UTC() case *IntegerLiteral: value = time.Unix(0, lit.Val).UTC() default: return TimeRange{}, fmt.Errorf("invalid operation: time and %T are not compatible", lit) } timeRange := TimeRange{} switch op { case GT: timeRange.Min = value.Add(time.Nanosecond) case GTE: timeRange.Min = value case LT: timeRange.Max = value.Add(-time.Nanosecond) case LTE: timeRange.Max = value case EQ: timeRange.Min, timeRange.Max = value, value default: return TimeRange{}, fmt.Errorf("invalid time comparison operator: %s", op) } return timeRange, nil } influxql-1.1.0/ast_test.go000066400000000000000000001772361363177076200155640ustar00rootroot00000000000000package influxql_test import ( "fmt" "go/importer" "math" "reflect" "strings" "testing" "time" "github.com/influxdata/influxql" ) func BenchmarkQuery_String(b *testing.B) { p := influxql.NewParser(strings.NewReader(`SELECT foo AS zoo, a AS b FROM bar WHERE value > 10 AND q = 'hello'`)) q, _ := p.ParseStatement() for i := 0; i < b.N; i++ { _ = q.String() } } // Ensure a value's data type can be retrieved. func TestInspectDataType(t *testing.T) { for i, tt := range []struct { v interface{} typ influxql.DataType }{ {float64(100), influxql.Float}, {int64(100), influxql.Integer}, {int32(100), influxql.Integer}, {100, influxql.Integer}, {true, influxql.Boolean}, {"string", influxql.String}, {time.Now(), influxql.Time}, {time.Second, influxql.Duration}, {nil, influxql.Unknown}, } { if typ := influxql.InspectDataType(tt.v); tt.typ != typ { t.Errorf("%d. %v (%s): unexpected type: %s", i, tt.v, tt.typ, typ) continue } } } func TestDataTypeFromString(t *testing.T) { for i, tt := range []struct { s string typ influxql.DataType }{ {s: "float", typ: influxql.Float}, {s: "integer", typ: influxql.Integer}, {s: "unsigned", typ: influxql.Unsigned}, {s: "string", typ: influxql.String}, {s: "boolean", typ: influxql.Boolean}, {s: "time", typ: influxql.Time}, {s: "duration", typ: influxql.Duration}, {s: "tag", typ: influxql.Tag}, {s: "field", typ: influxql.AnyField}, {s: "foobar", typ: influxql.Unknown}, } { if typ := influxql.DataTypeFromString(tt.s); tt.typ != typ { t.Errorf("%d. %s: unexpected type: %s != %s", i, tt.s, tt.typ, typ) } } } func TestDataType_String(t *testing.T) { for i, tt := range []struct { typ influxql.DataType v string }{ {influxql.Float, "float"}, {influxql.Integer, "integer"}, {influxql.Boolean, "boolean"}, {influxql.String, "string"}, {influxql.Time, "time"}, {influxql.Duration, "duration"}, {influxql.Tag, "tag"}, {influxql.Unknown, "unknown"}, } { if v := tt.typ.String(); tt.v != v { t.Errorf("%d. %v (%s): unexpected string: %s", i, tt.typ, tt.v, v) } } } func TestDataType_LessThan(t *testing.T) { for i, tt := range []struct { typ influxql.DataType other influxql.DataType exp bool }{ {typ: influxql.Unknown, other: influxql.Unknown, exp: true}, {typ: influxql.Unknown, other: influxql.Float, exp: true}, {typ: influxql.Unknown, other: influxql.Integer, exp: true}, {typ: influxql.Unknown, other: influxql.Unsigned, exp: true}, {typ: influxql.Unknown, other: influxql.String, exp: true}, {typ: influxql.Unknown, other: influxql.Boolean, exp: true}, {typ: influxql.Unknown, other: influxql.Tag, exp: true}, {typ: influxql.Float, other: influxql.Unknown, exp: false}, {typ: influxql.Integer, other: influxql.Unknown, exp: false}, {typ: influxql.Unsigned, other: influxql.Unknown, exp: false}, {typ: influxql.String, other: influxql.Unknown, exp: false}, {typ: influxql.Boolean, other: influxql.Unknown, exp: false}, {typ: influxql.Tag, other: influxql.Unknown, exp: false}, {typ: influxql.Float, other: influxql.Float, exp: false}, {typ: influxql.Float, other: influxql.Integer, exp: false}, {typ: influxql.Float, other: influxql.Unsigned, exp: false}, {typ: influxql.Float, other: influxql.String, exp: false}, {typ: influxql.Float, other: influxql.Boolean, exp: false}, {typ: influxql.Float, other: influxql.Tag, exp: false}, {typ: influxql.Integer, other: influxql.Float, exp: true}, {typ: influxql.Integer, other: influxql.Integer, exp: false}, {typ: influxql.Integer, other: influxql.Unsigned, exp: false}, {typ: influxql.Integer, other: influxql.String, exp: false}, {typ: influxql.Integer, other: influxql.Boolean, exp: false}, {typ: influxql.Integer, other: influxql.Tag, exp: false}, {typ: influxql.Unsigned, other: influxql.Float, exp: true}, {typ: influxql.Unsigned, other: influxql.Integer, exp: true}, {typ: influxql.Unsigned, other: influxql.Unsigned, exp: false}, {typ: influxql.Unsigned, other: influxql.String, exp: false}, {typ: influxql.Unsigned, other: influxql.Boolean, exp: false}, {typ: influxql.Unsigned, other: influxql.Tag, exp: false}, {typ: influxql.String, other: influxql.Float, exp: true}, {typ: influxql.String, other: influxql.Integer, exp: true}, {typ: influxql.String, other: influxql.Unsigned, exp: true}, {typ: influxql.String, other: influxql.String, exp: false}, {typ: influxql.String, other: influxql.Boolean, exp: false}, {typ: influxql.String, other: influxql.Tag, exp: false}, {typ: influxql.Boolean, other: influxql.Float, exp: true}, {typ: influxql.Boolean, other: influxql.Integer, exp: true}, {typ: influxql.Boolean, other: influxql.Unsigned, exp: true}, {typ: influxql.Boolean, other: influxql.String, exp: true}, {typ: influxql.Boolean, other: influxql.Boolean, exp: false}, {typ: influxql.Boolean, other: influxql.Tag, exp: false}, {typ: influxql.Tag, other: influxql.Float, exp: true}, {typ: influxql.Tag, other: influxql.Integer, exp: true}, {typ: influxql.Tag, other: influxql.Unsigned, exp: true}, {typ: influxql.Tag, other: influxql.String, exp: true}, {typ: influxql.Tag, other: influxql.Boolean, exp: true}, {typ: influxql.Tag, other: influxql.Tag, exp: false}, } { if got, exp := tt.typ.LessThan(tt.other), tt.exp; got != exp { t.Errorf("%d. %q.LessThan(%q) = %v; exp = %v", i, tt.typ, tt.other, got, exp) } } } // Ensure the SELECT statement can extract GROUP BY interval. func TestSelectStatement_GroupByInterval(t *testing.T) { q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", stmt, err) } s := stmt.(*influxql.SelectStatement) d, err := s.GroupByInterval() if d != 10*time.Minute { t.Fatalf("group by interval not equal:\nexp=%s\ngot=%s", 10*time.Minute, d) } if err != nil { t.Fatalf("error parsing group by interval: %s", err.Error()) } } // Ensure the SELECT statement can have its start and end time set func TestSelectStatement_SetTimeRange(t *testing.T) { q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", stmt, err) } s := stmt.(*influxql.SelectStatement) start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) min, max := MustTimeRange(s.Condition) if min != start { t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) } // the end range is actually one nanosecond before the given one since end is exclusive end = end.Add(-time.Nanosecond) if max != end { t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) } // ensure we can set a time on a select that already has one set start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC() q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix()) stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", stmt, err) } s = stmt.(*influxql.SelectStatement) min, max = MustTimeRange(s.Condition) if start != min || end != max { t.Fatalf("start and end times weren't equal:\n exp: %s\n got: %s\n exp: %s\n got:%s\n", start, min, end, max) } // update and ensure it saves it start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) min, max = MustTimeRange(s.Condition) // TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to. // shouldn't matter for our purposes with continuous queries, but fix this later if min != start { t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) } // the end range is actually one nanosecond before the given one since end is exclusive end = end.Add(-time.Nanosecond) if max != end { t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) } // ensure that when we set a time range other where clause conditions are still there q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)" stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", stmt, err) } s = stmt.(*influxql.SelectStatement) // update and ensure it saves it start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() s.SetTimeRange(start, end) min, max = MustTimeRange(s.Condition) if min != start { t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) } // the end range is actually one nanosecond before the given one since end is exclusive end = end.Add(-time.Nanosecond) if max != end { t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) } // ensure the where clause is there hasWhere := false influxql.WalkFunc(s.Condition, func(n influxql.Node) { if ex, ok := n.(*influxql.BinaryExpr); ok { if lhs, ok := ex.LHS.(*influxql.VarRef); ok { if lhs.Val == "foo" { if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok { if rhs.Val == "bar" { hasWhere = true } } } } } }) if !hasWhere { t.Fatal("set time range cleared out the where clause") } } func TestSelectStatement_HasWildcard(t *testing.T) { var tests = []struct { stmt string wildcard bool }{ // No wildcards { stmt: `SELECT value FROM cpu`, wildcard: false, }, // Query wildcard { stmt: `SELECT * FROM cpu`, wildcard: true, }, // No GROUP BY wildcards { stmt: `SELECT value FROM cpu GROUP BY host`, wildcard: false, }, // No GROUP BY wildcards, time only { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, wildcard: false, }, // GROUP BY wildcard { stmt: `SELECT value FROM cpu GROUP BY *`, wildcard: true, }, // GROUP BY wildcard with time { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, wildcard: true, }, // GROUP BY wildcard with explicit { stmt: `SELECT value FROM cpu GROUP BY *,host`, wildcard: true, }, // GROUP BY multiple wildcards { stmt: `SELECT value FROM cpu GROUP BY *,*`, wildcard: true, }, // Combo { stmt: `SELECT * FROM cpu GROUP BY *`, wildcard: true, }, } for i, tt := range tests { // Parse statement. stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.stmt, err) } // Test wildcard detection. if w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w { t.Errorf("%d. %q: unexpected wildcard detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.wildcard, w) continue } } } // Test SELECT statement field rewrite. func TestSelectStatement_RewriteFields(t *testing.T) { var tests = []struct { stmt string rewrite string err string }{ // No wildcards { stmt: `SELECT value FROM cpu`, rewrite: `SELECT value FROM cpu`, }, // Query wildcard { stmt: `SELECT * FROM cpu`, rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer FROM cpu`, }, // Parser fundamentally prohibits multiple query sources // Query wildcard with explicit { stmt: `SELECT *,value1 FROM cpu`, rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, value1::float FROM cpu`, }, // Query multiple wildcards { stmt: `SELECT *,* FROM cpu`, rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, }, // Query wildcards with group by { stmt: `SELECT * FROM cpu GROUP BY host`, rewrite: `SELECT region::tag, value1::float, value2::integer FROM cpu GROUP BY host`, }, // No GROUP BY wildcards { stmt: `SELECT value FROM cpu GROUP BY host`, rewrite: `SELECT value FROM cpu GROUP BY host`, }, // No GROUP BY wildcards, time only { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`, }, // GROUP BY wildcard { stmt: `SELECT value FROM cpu GROUP BY *`, rewrite: `SELECT value FROM cpu GROUP BY host, region`, }, // GROUP BY wildcard with time { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`, }, // GROUP BY wildcard with fill { stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`, rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`, }, // GROUP BY wildcard with explicit { stmt: `SELECT value FROM cpu GROUP BY *,host`, rewrite: `SELECT value FROM cpu GROUP BY host, region, host`, }, // GROUP BY multiple wildcards { stmt: `SELECT value FROM cpu GROUP BY *,*`, rewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`, }, // Combo { stmt: `SELECT * FROM cpu GROUP BY *`, rewrite: `SELECT value1::float, value2::integer FROM cpu GROUP BY host, region`, }, // Wildcard function with all fields. { stmt: `SELECT mean(*) FROM cpu`, rewrite: `SELECT mean(value1::float) AS mean_value1, mean(value2::integer) AS mean_value2 FROM cpu`, }, { stmt: `SELECT distinct(*) FROM strings`, rewrite: `SELECT distinct(string::string) AS distinct_string, distinct(value::float) AS distinct_value FROM strings`, }, { stmt: `SELECT distinct(*) FROM bools`, rewrite: `SELECT distinct(bool::boolean) AS distinct_bool, distinct(value::float) AS distinct_value FROM bools`, }, // Wildcard function with some fields excluded. { stmt: `SELECT mean(*) FROM strings`, rewrite: `SELECT mean(value::float) AS mean_value FROM strings`, }, { stmt: `SELECT mean(*) FROM bools`, rewrite: `SELECT mean(value::float) AS mean_value FROM bools`, }, // Wildcard function with an alias. { stmt: `SELECT mean(*) AS alias FROM cpu`, rewrite: `SELECT mean(value1::float) AS alias_value1, mean(value2::integer) AS alias_value2 FROM cpu`, }, // Query regex { stmt: `SELECT /1/ FROM cpu`, rewrite: `SELECT value1::float FROM cpu`, }, { stmt: `SELECT value1 FROM cpu GROUP BY /h/`, rewrite: `SELECT value1::float FROM cpu GROUP BY host`, }, // Query regex { stmt: `SELECT mean(/1/) FROM cpu`, rewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`, }, // Rewrite subquery { stmt: `SELECT * FROM (SELECT mean(value1) FROM cpu GROUP BY host) GROUP BY *`, rewrite: `SELECT mean::float FROM (SELECT mean(value1::float) FROM cpu GROUP BY host) GROUP BY host`, }, // Invalid queries that can't be rewritten should return an error (to // avoid a panic in the query engine) { stmt: `SELECT count(*) / 2 FROM cpu`, err: `unsupported expression with wildcard: count(*) / 2`, }, { stmt: `SELECT * / 2 FROM (SELECT count(*) FROM cpu)`, err: `unsupported expression with wildcard: * / 2`, }, { stmt: `SELECT count(/value/) / 2 FROM cpu`, err: `unsupported expression with regex field: count(/value/) / 2`, }, // This one should be possible though since there's no wildcard in the // binary expression. { stmt: `SELECT value1 + value2, * FROM cpu`, rewrite: `SELECT value1::float + value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, }, { stmt: `SELECT value1 + value2, /value/ FROM cpu`, rewrite: `SELECT value1::float + value2::integer, value1::float, value2::integer FROM cpu`, }, } for i, tt := range tests { // Parse statement. stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.stmt, err) } var mapper FieldMapper mapper.FieldDimensionsFn = func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { switch m.Name { case "cpu": fields = map[string]influxql.DataType{ "value1": influxql.Float, "value2": influxql.Integer, } case "strings": fields = map[string]influxql.DataType{ "value": influxql.Float, "string": influxql.String, } case "bools": fields = map[string]influxql.DataType{ "value": influxql.Float, "bool": influxql.Boolean, } } dimensions = map[string]struct{}{"host": struct{}{}, "region": struct{}{}} return } // Rewrite statement. rw, err := stmt.(*influxql.SelectStatement).RewriteFields(&mapper) if tt.err != "" { if err != nil && err.Error() != tt.err { t.Errorf("%d. %q: unexpected error: %s != %s", i, tt.stmt, err.Error(), tt.err) } else if err == nil { t.Errorf("%d. %q: expected error", i, tt.stmt) } } else { if err != nil { t.Errorf("%d. %q: error: %s", i, tt.stmt, err) } else if rw == nil && tt.err == "" { t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt) } else if rw := rw.String(); tt.rewrite != rw { t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw) } } } } // Test SELECT statement regex conditions rewrite. func TestSelectStatement_RewriteRegexConditions(t *testing.T) { var tests = []struct { in string out string }{ {in: `SELECT value FROM cpu`, out: `SELECT value FROM cpu`}, {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, {in: `SELECT value FROM cpu WHERE host != 'server-1'`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, // Non matching regex {in: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`, out: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`}, {in: `SELECT value FROM cpu WHERE host =~ /server-1/`, out: `SELECT value FROM cpu WHERE host =~ /server-1/`}, {in: `SELECT value FROM cpu WHERE host !~ /server-1/`, out: `SELECT value FROM cpu WHERE host !~ /server-1/`}, {in: `SELECT value FROM cpu WHERE host =~ /^server-1/`, out: `SELECT value FROM cpu WHERE host =~ /^server-1/`}, {in: `SELECT value FROM cpu WHERE host =~ /server-1$/`, out: `SELECT value FROM cpu WHERE host =~ /server-1$/`}, {in: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`, out: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`}, {in: `SELECT value FROM cpu WHERE host !~ /\^$/`, out: `SELECT value FROM cpu WHERE host !~ /\^$/`}, {in: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`, out: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`}, {in: `SELECT value FROM cpu WHERE host =~ /^\$/`, out: `SELECT value FROM cpu WHERE host =~ /^\$/`}, {in: `SELECT value FROM cpu WHERE host !~ /^a/`, out: `SELECT value FROM cpu WHERE host !~ /^a/`}, // These regexes are not supported due to the presence of escaped or meta characters. {in: `SELECT value FROM cpu WHERE host !~ /^?a$/`, out: `SELECT value FROM cpu WHERE host !~ /^?a$/`}, {in: `SELECT value FROM cpu WHERE host !~ /^a*$/`, out: `SELECT value FROM cpu WHERE host !~ /^a*$/`}, {in: `SELECT value FROM cpu WHERE host !~ /^a.b$/`, out: `SELECT value FROM cpu WHERE host !~ /^a.b$/`}, {in: `SELECT value FROM cpu WHERE host !~ /^ab+$/`, out: `SELECT value FROM cpu WHERE host !~ /^ab+$/`}, // These regexes are not supported due to the presence of unsupported regex flags. {in: `SELECT value FROM cpu WHERE host =~ /(?i)^SeRvEr01$/`, out: `SELECT value FROM cpu WHERE host =~ /(?i)^SeRvEr01$/`}, // These regexes are not supported due to large character class(es). {in: `SELECT value FROM cpu WHERE host =~ /^[^abcd]$/`, out: `SELECT value FROM cpu WHERE host =~ /^[^abcd]$/`}, // These regexes all match and will be rewritten. {in: `SELECT value FROM cpu WHERE host !~ /^a[2]$/`, out: `SELECT value FROM cpu WHERE host != 'a2'`}, {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, {in: `SELECT value FROM cpu WHERE host !~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, {in: `SELECT value FROM cpu WHERE host =~ /^server 1$/`, out: `SELECT value FROM cpu WHERE host = 'server 1'`}, {in: `SELECT value FROM cpu WHERE host =~ /^$/`, out: `SELECT value FROM cpu WHERE host = ''`}, {in: `SELECT value FROM cpu WHERE host !~ /^$/`, out: `SELECT value FROM cpu WHERE host != ''`}, {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server-2$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2'`}, {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server]a$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server]a'`}, {in: `SELECT value FROM cpu WHERE host =~ /^hello\?$/`, out: `SELECT value FROM cpu WHERE host = 'hello?'`}, {in: `SELECT value FROM cpu WHERE host !~ /^\\$/`, out: `SELECT value FROM cpu WHERE host != '\\'`}, {in: `SELECT value FROM cpu WHERE host !~ /^\\\$$/`, out: `SELECT value FROM cpu WHERE host != '\\$'`}, // This is supported, but annoying to write and the below queries satisfy this condition. //{in: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`, out: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`}, {in: `SELECT value FROM cpu WHERE host =~ /^(server-1|server-2|server-3)$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2' OR host = 'server-3'`}, {in: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`, out: `SELECT value FROM cpu WHERE host != 'foo' AND host != 'bar'`}, {in: `SELECT value FROM cpu WHERE host !~ /^\d$/`, out: `SELECT value FROM cpu WHERE host != '0' AND host != '1' AND host != '2' AND host != '3' AND host != '4' AND host != '5' AND host != '6' AND host != '7' AND host != '8' AND host != '9'`}, {in: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`, out: `SELECT value FROM cpu WHERE host != 'a' AND host != 'b' AND host != 'c' AND host != 'd' AND host != 'e' AND host != 'f' AND host != 'g' AND host != 'h' AND host != 'i' AND host != 'j' AND host != 'k' AND host != 'l' AND host != 'm' AND host != 'n' AND host != 'o' AND host != 'p' AND host != 'q' AND host != 'r' AND host != 's' AND host != 't' AND host != 'u' AND host != 'v' AND host != 'w' AND host != 'x' AND host != 'y' AND host != 'z'`}, {in: `SELECT value FROM cpu WHERE host =~ /^[ab]{3}$/`, out: `SELECT value FROM cpu WHERE host = 'aaa' OR host = 'aab' OR host = 'aba' OR host = 'abb' OR host = 'baa' OR host = 'bab' OR host = 'bba' OR host = 'bbb'`}, } for i, test := range tests { stmt, err := influxql.NewParser(strings.NewReader(test.in)).ParseStatement() if err != nil { t.Fatalf("[Example %d], %v", i, err) } // Rewrite any supported regex conditions. stmt.(*influxql.SelectStatement).RewriteRegexConditions() // Get the expected rewritten statement. expStmt, err := influxql.NewParser(strings.NewReader(test.out)).ParseStatement() if err != nil { t.Fatalf("[Example %d], %v", i, err) } // Compare the (potentially) rewritten AST to the expected AST. if got, exp := stmt, expStmt; !reflect.DeepEqual(got, exp) { t.Errorf("[Example %d]\nattempting %v\ngot %v\n%s\n\nexpected %v\n%s\n", i+1, test.in, got, mustMarshalJSON(got), exp, mustMarshalJSON(exp)) } } } // Test SELECT statement time field rewrite. func TestSelectStatement_RewriteTimeFields(t *testing.T) { var tests = []struct { s string stmt influxql.Statement }{ { s: `SELECT time, field1 FROM cpu`, stmt: &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{ {Expr: &influxql.VarRef{Val: "field1"}}, }, Sources: []influxql.Source{ &influxql.Measurement{Name: "cpu"}, }, }, }, { s: `SELECT time AS timestamp, field1 FROM cpu`, stmt: &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{ {Expr: &influxql.VarRef{Val: "field1"}}, }, Sources: []influxql.Source{ &influxql.Measurement{Name: "cpu"}, }, TimeAlias: "timestamp", }, }, } for i, tt := range tests { // Parse statement. stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.s, err) } // Rewrite statement. stmt.(*influxql.SelectStatement).RewriteTimeFields() if !reflect.DeepEqual(tt.stmt, stmt) { t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) } } } // Ensure that the IsRawQuery flag gets set properly func TestSelectStatement_IsRawQuerySet(t *testing.T) { var tests = []struct { stmt string isRaw bool }{ { stmt: "select * from foo", isRaw: true, }, { stmt: "select value1,value2 from foo", isRaw: true, }, { stmt: "select value1,value2 from foo, time(10m)", isRaw: true, }, { stmt: "select mean(value) from foo where time < now() group by time(5m)", isRaw: false, }, { stmt: "select mean(value) from foo group by bar", isRaw: false, }, { stmt: "select mean(value) from foo group by *", isRaw: false, }, { stmt: "select mean(value) from foo group by *", isRaw: false, }, } for _, tt := range tests { s := MustParseSelectStatement(tt.stmt) if s.IsRawQuery != tt.isRaw { t.Errorf("'%s', IsRawQuery should be %v", tt.stmt, tt.isRaw) } } } // Ensure binary expression names can be evaluated. func TestBinaryExprName(t *testing.T) { for i, tt := range []struct { expr string name string }{ {expr: `value + 1`, name: `value`}, {expr: `"user" / total`, name: `user_total`}, {expr: `("user" + total) / total`, name: `user_total_total`}, } { expr := influxql.MustParseExpr(tt.expr) switch expr := expr.(type) { case *influxql.BinaryExpr: name := influxql.BinaryExprName(expr) if name != tt.name { t.Errorf("%d. unexpected name %s, got %s", i, name, tt.name) } default: t.Errorf("%d. unexpected expr type: %T", i, expr) } } } func TestConditionExpr(t *testing.T) { mustParseTime := func(value string) time.Time { ts, err := time.Parse(time.RFC3339, value) if err != nil { t.Fatalf("unable to parse time: %s", err) } return ts } now := mustParseTime("2000-01-01T00:00:00Z") valuer := influxql.NowValuer{Now: now} for _, tt := range []struct { s string cond string min, max time.Time err string }{ {s: `host = 'server01'`, cond: `host = 'server01'`}, {s: `time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z'`, min: mustParseTime("2000-01-01T00:00:00Z"), max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, {s: `host = 'server01' AND (region = 'uswest' AND time >= now() - 10m)`, cond: `host = 'server01' AND (region = 'uswest')`, min: mustParseTime("1999-12-31T23:50:00Z")}, {s: `(host = 'server01' AND region = 'uswest') AND time >= now() - 10m`, cond: `host = 'server01' AND region = 'uswest'`, min: mustParseTime("1999-12-31T23:50:00Z")}, {s: `host = 'server01' AND (time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z')`, cond: `host = 'server01'`, min: mustParseTime("2000-01-01T00:00:00Z"), max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, {s: `(time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z') AND host = 'server01'`, cond: `host = 'server01'`, min: mustParseTime("2000-01-01T00:00:00Z"), max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, {s: `'2000-01-01T00:00:00Z' <= time AND '2000-01-01T01:00:00Z' > time`, min: mustParseTime("2000-01-01T00:00:00Z"), max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, {s: `'2000-01-01T00:00:00Z' < time AND '2000-01-01T01:00:00Z' >= time`, min: mustParseTime("2000-01-01T00:00:00Z").Add(1), max: mustParseTime("2000-01-01T01:00:00Z")}, {s: `time = '2000-01-01T00:00:00Z'`, min: mustParseTime("2000-01-01T00:00:00Z"), max: mustParseTime("2000-01-01T00:00:00Z")}, {s: `time >= 10s`, min: mustParseTime("1970-01-01T00:00:10Z")}, {s: `time >= 10000000000`, min: mustParseTime("1970-01-01T00:00:10Z")}, {s: `time >= 10000000000.0`, min: mustParseTime("1970-01-01T00:00:10Z")}, {s: `time > now()`, min: now.Add(1)}, {s: `value`, err: `invalid condition expression: value`}, {s: `4`, err: `invalid condition expression: 4`}, {s: `time >= 'today'`, err: `invalid operation: time and *influxql.StringLiteral are not compatible`}, {s: `time != '2000-01-01T00:00:00Z'`, err: `invalid time comparison operator: !=`}, // This query makes no logical sense, but it's common enough that we pretend // it does. Technically, this should be illegal because the AND has higher precedence // than the OR so the AND only applies to the server02 tag, but a person's intention // is to have it apply to both and previous versions worked that way. {s: `host = 'server01' OR host = 'server02' AND time >= now() - 10m`, cond: `host = 'server01' OR host = 'server02'`, min: mustParseTime("1999-12-31T23:50:00Z")}, // TODO(jsternberg): This should be an error, but we can't because the above query // needs to work. Until we can work a way for the above to work or at least get // a warning message for people to transition to a correct syntax, the bad behavior // stays. //{s: `host = 'server01' OR (time >= now() - 10m AND host = 'server02')`, err: `cannot use OR with time conditions`}, {s: `value AND host = 'server01'`, err: `invalid condition expression: value`}, {s: `host = 'server01' OR (value)`, err: `invalid condition expression: value`}, {s: `time > '2262-04-11 23:47:17'`, err: `time 2262-04-11T23:47:17Z overflows time literal`}, {s: `time > '1677-09-20 19:12:43'`, err: `time 1677-09-20T19:12:43Z underflows time literal`}, {s: `true AND (false OR product = 'xyz')`, cond: `product = 'xyz'`, }, {s: `'a' = 'a'`, cond: ``}, {s: `value > 0 OR true`, cond: ``}, {s: `host = 'server01' AND false`, cond: `false`}, {s: `TIME >= '2000-01-01T00:00:00Z'`, min: mustParseTime("2000-01-01T00:00:00Z")}, {s: `'2000-01-01T00:00:00Z' <= TIME`, min: mustParseTime("2000-01-01T00:00:00Z")}, // Remove enclosing parentheses {s: `(host = 'server01')`, cond: `host = 'server01'`}, // Preserve nested parentheses {s: `host = 'server01' AND (region = 'region01' OR region = 'region02')`, cond: `host = 'server01' AND (region = 'region01' OR region = 'region02')`, }, } { t.Run(tt.s, func(t *testing.T) { expr, err := influxql.ParseExpr(tt.s) if err != nil { t.Fatalf("unexpected error: %s", err) } cond, timeRange, err := influxql.ConditionExpr(expr, &valuer) if err != nil { if tt.err == "" { t.Fatalf("unexpected error: %s", err) } else if have, want := err.Error(), tt.err; have != want { t.Fatalf("unexpected error: %s != %s", have, want) } } if cond != nil { if have, want := cond.String(), tt.cond; have != want { t.Errorf("unexpected condition:\nhave=%s\nwant=%s", have, want) } } else { if have, want := "", tt.cond; have != want { t.Errorf("unexpected condition:\nhave=%s\nwant=%s", have, want) } } if have, want := timeRange.Min, tt.min; !have.Equal(want) { t.Errorf("unexpected min time:\nhave=%s\nwant=%s", have, want) } if have, want := timeRange.Max, tt.max; !have.Equal(want) { t.Errorf("unexpected max time:\nhave=%s\nwant=%s", have, want) } }) } } // Ensure an AST node can be rewritten. func TestRewrite(t *testing.T) { expr := MustParseExpr(`time > 1 OR foo = 2`) // Flip LHS & RHS in all binary expressions. act := influxql.RewriteFunc(expr, func(n influxql.Node) influxql.Node { switch n := n.(type) { case *influxql.BinaryExpr: return &influxql.BinaryExpr{Op: n.Op, LHS: n.RHS, RHS: n.LHS} default: return n } }) // Verify that everything is flipped. if act := act.String(); act != `2 = foo OR 1 > time` { t.Fatalf("unexpected result: %s", act) } } // Ensure an Expr can be rewritten handling nils. func TestRewriteExpr(t *testing.T) { expr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`) // Remove all time expressions. act := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { case *influxql.BinaryExpr: if lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == "time" { return nil } } return e }) // Verify that everything is flipped. if act := act.String(); act != `foo = 2` { t.Fatalf("unexpected result: %s", act) } } // Ensure that the String() value of a statement is parseable func TestParseString(t *testing.T) { var tests = []struct { stmt string }{ { stmt: `SELECT "cpu load" FROM myseries`, }, { stmt: `SELECT "cpu load" FROM "my series"`, }, { stmt: `SELECT "cpu\"load" FROM myseries`, }, { stmt: `SELECT "cpu'load" FROM myseries`, }, { stmt: `SELECT "cpu load" FROM "my\"series"`, }, { stmt: `SELECT "field with spaces" FROM "\"ugly\" db"."\"ugly\" rp"."\"ugly\" measurement"`, }, { stmt: `SELECT * FROM myseries`, }, { stmt: `DROP DATABASE "!"`, }, { stmt: `DROP RETENTION POLICY "my rp" ON "a database"`, }, { stmt: `CREATE RETENTION POLICY "my rp" ON "a database" DURATION 1d REPLICATION 1`, }, { stmt: `ALTER RETENTION POLICY "my rp" ON "a database" DEFAULT`, }, { stmt: `SHOW RETENTION POLICIES ON "a database"`, }, { stmt: `SHOW TAG VALUES WITH KEY IN ("a long name", short)`, }, { stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`, }, // See issues https://github.com/influxdata/influxdb/issues/1647 // and https://github.com/influxdata/influxdb/issues/4404 //{ // stmt: `DELETE FROM "my db"."my rp"."my measurement"`, //}, { stmt: `DROP SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp"`, }, { stmt: `CREATE SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp" DESTINATIONS ALL 'my host', 'my other host'`, }, { stmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`, }, { stmt: `SHOW MEASUREMENTS WITH MEASUREMENT = "and/or"`, }, { stmt: `DROP USER "user with spaces"`, }, { stmt: `GRANT ALL PRIVILEGES ON "db with spaces" TO "user with spaces"`, }, { stmt: `GRANT ALL PRIVILEGES TO "user with spaces"`, }, { stmt: `SHOW GRANTS FOR "user with spaces"`, }, { stmt: `REVOKE ALL PRIVILEGES ON "db with spaces" FROM "user with spaces"`, }, { stmt: `REVOKE ALL PRIVILEGES FROM "user with spaces"`, }, { stmt: `CREATE DATABASE "db with spaces"`, }, } for _, tt := range tests { // Parse statement. stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() if err != nil { t.Fatalf("invalid statement: %q: %s", tt.stmt, err) } stmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement() if err != nil { t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String()) } if !reflect.DeepEqual(stmt, stmtCopy) { t.Fatalf("statement changed after stringifying and re-parsing:\noriginal : %v\nre-parsed: %v\n", tt.stmt, stmtCopy.String()) } } } // Ensure an expression can be reduced. func TestEval(t *testing.T) { for i, tt := range []struct { in string out interface{} data map[string]interface{} }{ // Number literals. {in: `1 + 2`, out: int64(3)}, {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{"foo": float64(5)}}, {in: `foo / 2`, out: float64(2), data: map[string]interface{}{"foo": float64(4)}}, {in: `4 = 4`, out: true}, {in: `4 <> 4`, out: false}, {in: `6 > 4`, out: true}, {in: `4 >= 4`, out: true}, {in: `4 < 6`, out: true}, {in: `4 <= 4`, out: true}, {in: `4 AND 5`, out: nil}, {in: `0 = 'test'`, out: false}, {in: `1.0 = 1`, out: true}, {in: `1.2 = 1`, out: false}, {in: `-1 = 9223372036854775808`, out: false}, {in: `-1 != 9223372036854775808`, out: true}, {in: `-1 < 9223372036854775808`, out: true}, {in: `-1 <= 9223372036854775808`, out: true}, {in: `-1 > 9223372036854775808`, out: false}, {in: `-1 >= 9223372036854775808`, out: false}, {in: `9223372036854775808 = -1`, out: false}, {in: `9223372036854775808 != -1`, out: true}, {in: `9223372036854775808 < -1`, out: false}, {in: `9223372036854775808 <= -1`, out: false}, {in: `9223372036854775808 > -1`, out: true}, {in: `9223372036854775808 >= -1`, out: true}, {in: `9223372036854775808 = 9223372036854775808`, out: true}, {in: `9223372036854775808 != 9223372036854775808`, out: false}, {in: `9223372036854775808 < 9223372036854775808`, out: false}, {in: `9223372036854775808 <= 9223372036854775808`, out: true}, {in: `9223372036854775808 > 9223372036854775808`, out: false}, {in: `9223372036854775808 >= 9223372036854775808`, out: true}, {in: `9223372036854775809 = 9223372036854775808`, out: false}, {in: `9223372036854775809 != 9223372036854775808`, out: true}, {in: `9223372036854775809 < 9223372036854775808`, out: false}, {in: `9223372036854775809 <= 9223372036854775808`, out: false}, {in: `9223372036854775809 > 9223372036854775808`, out: true}, {in: `9223372036854775809 >= 9223372036854775808`, out: true}, {in: `9223372036854775808 / 0`, out: uint64(0)}, {in: `9223372036854775808 + 1`, out: uint64(9223372036854775809)}, {in: `9223372036854775808 - 1`, out: uint64(9223372036854775807)}, {in: `9223372036854775809 - 9223372036854775808`, out: uint64(1)}, // Boolean literals. {in: `true AND false`, out: false}, {in: `true OR false`, out: true}, {in: `false = 4`, out: false}, // String literals. {in: `'foo' = 'bar'`, out: false}, {in: `'foo' = 'foo'`, out: true}, {in: `'' = 4`, out: false}, // Regex literals. {in: `'foo' =~ /f.*/`, out: true}, {in: `'foo' =~ /b.*/`, out: false}, {in: `'foo' !~ /f.*/`, out: false}, {in: `'foo' !~ /b.*/`, out: true}, // Variable references. {in: `foo`, out: "bar", data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: true, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: false, data: map[string]interface{}{"foo": nil}}, {in: `'bar' = foo`, out: false, data: map[string]interface{}{"foo": nil}}, {in: `foo <> 'bar'`, out: true, data: map[string]interface{}{"foo": "xxx"}}, {in: `foo =~ /b.*/`, out: true, data: map[string]interface{}{"foo": "bar"}}, {in: `foo !~ /b.*/`, out: false, data: map[string]interface{}{"foo": "bar"}}, {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"foo": float64(4)}}, {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"bar": float64(4)}}, } { // Evaluate expression. out := influxql.Eval(MustParseExpr(tt.in), tt.data) // Compare with expected output. if !reflect.DeepEqual(tt.out, out) { t.Errorf("%d. %s: unexpected output:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.in, tt.out, out) continue } } } type EvalFixture map[string]map[string]influxql.DataType func (e EvalFixture) MapType(measurement *influxql.Measurement, field string) influxql.DataType { m := e[measurement.Name] if m == nil { return influxql.Unknown } return m[field] } func (e EvalFixture) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { switch name { case "mean", "median", "integral", "stddev": return influxql.Float, nil case "count": return influxql.Integer, nil case "elapsed": return influxql.Integer, nil default: return args[0], nil } } func TestEvalType(t *testing.T) { for i, tt := range []struct { name string in string typ influxql.DataType err string data EvalFixture }{ { name: `a single data type`, in: `min(value)`, typ: influxql.Integer, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Integer, }, }, }, { name: `multiple data types`, in: `min(value)`, typ: influxql.Integer, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Integer, }, "mem": map[string]influxql.DataType{ "value": influxql.String, }, }, }, { name: `count() with a float`, in: `count(value)`, typ: influxql.Integer, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Float, }, }, }, { name: `mean() with an integer`, in: `mean(value)`, typ: influxql.Float, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Integer, }, }, }, { name: `stddev() with an integer`, in: `stddev(value)`, typ: influxql.Float, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Integer, }, }, }, { name: `value inside a parenthesis`, in: `(value)`, typ: influxql.Float, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Float, }, }, }, { name: `binary expression with a float and integer`, in: `v1 + v2`, typ: influxql.Float, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "v1": influxql.Float, "v2": influxql.Integer, }, }, }, { name: `integer and unsigned literal`, in: `value + 9223372036854775808`, err: `type error: value + 9223372036854775808: cannot use + with an integer and unsigned literal`, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Integer, }, }, }, { name: `unsigned and integer literal`, in: `value + 1`, typ: influxql.Unsigned, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "value": influxql.Unsigned, }, }, }, { name: `incompatible types`, in: `v1 + v2`, err: `type error: v1 + v2: incompatible types: string and integer`, data: EvalFixture{ "cpu": map[string]influxql.DataType{ "v1": influxql.String, "v2": influxql.Integer, }, }, }, } { sources := make([]influxql.Source, 0, len(tt.data)) for src := range tt.data { sources = append(sources, &influxql.Measurement{Name: src}) } expr := influxql.MustParseExpr(tt.in) valuer := influxql.TypeValuerEval{ TypeMapper: tt.data, Sources: sources, } typ, err := valuer.EvalType(expr) if err != nil { if exp, got := tt.err, err.Error(); exp != got { t.Errorf("%d. %s: unexpected error:\n\nexp=%#v\n\ngot=%v\n\n", i, tt.name, exp, got) } } else if typ != tt.typ { t.Errorf("%d. %s: unexpected type:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.name, tt.typ, typ) } } } // Ensure an expression can be reduced. func TestReduce(t *testing.T) { now := mustParseTime("2000-01-01T00:00:00Z") for i, tt := range []struct { in string out string data influxql.MapValuer }{ // Number literals. {in: `1 + 2`, out: `3`}, {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.500`}, {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`}, {in: `4 / 0`, out: `0.000`}, {in: `1 / 2`, out: `0.500`}, {in: `2 % 3`, out: `2`}, {in: `5 % 2`, out: `1`}, {in: `2 % 0`, out: `0`}, {in: `2.5 % 0`, out: `NaN`}, {in: `254 & 3`, out: `2`}, {in: `254 | 3`, out: `255`}, {in: `254 ^ 3`, out: `253`}, {in: `-3 & 3`, out: `1`}, {in: `8 & -3`, out: `8`}, {in: `8.5 & -3`, out: `8.500 & -3`}, {in: `4 = 4`, out: `true`}, {in: `4 <> 4`, out: `false`}, {in: `6 > 4`, out: `true`}, {in: `4 >= 4`, out: `true`}, {in: `4 < 6`, out: `true`}, {in: `4 <= 4`, out: `true`}, {in: `4 AND 5`, out: `4 AND 5`}, {in: `-1 = 9223372036854775808`, out: `false`}, {in: `-1 != 9223372036854775808`, out: `true`}, {in: `-1 < 9223372036854775808`, out: `true`}, {in: `-1 <= 9223372036854775808`, out: `true`}, {in: `-1 > 9223372036854775808`, out: `false`}, {in: `-1 >= 9223372036854775808`, out: `false`}, {in: `9223372036854775808 = -1`, out: `false`}, {in: `9223372036854775808 != -1`, out: `true`}, {in: `9223372036854775808 < -1`, out: `false`}, {in: `9223372036854775808 <= -1`, out: `false`}, {in: `9223372036854775808 > -1`, out: `true`}, {in: `9223372036854775808 >= -1`, out: `true`}, {in: `9223372036854775808 = 9223372036854775808`, out: `true`}, {in: `9223372036854775808 != 9223372036854775808`, out: `false`}, {in: `9223372036854775808 < 9223372036854775808`, out: `false`}, {in: `9223372036854775808 <= 9223372036854775808`, out: `true`}, {in: `9223372036854775808 > 9223372036854775808`, out: `false`}, {in: `9223372036854775808 >= 9223372036854775808`, out: `true`}, {in: `9223372036854775809 = 9223372036854775808`, out: `false`}, {in: `9223372036854775809 != 9223372036854775808`, out: `true`}, {in: `9223372036854775809 < 9223372036854775808`, out: `false`}, {in: `9223372036854775809 <= 9223372036854775808`, out: `false`}, {in: `9223372036854775809 > 9223372036854775808`, out: `true`}, {in: `9223372036854775809 >= 9223372036854775808`, out: `true`}, {in: `9223372036854775808 / 0`, out: `0`}, {in: `9223372036854775808 + 1`, out: `9223372036854775809`}, {in: `9223372036854775808 - 1`, out: `9223372036854775807`}, {in: `9223372036854775809 - 9223372036854775808`, out: `1`}, // Boolean literals. {in: `true AND false`, out: `false`}, {in: `true OR false`, out: `true`}, {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, {in: `true = false`, out: `false`}, {in: `true <> false`, out: `true`}, {in: `true + false`, out: `true + false`}, // Time literals with now(). {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`}, {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`}, {in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, {in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`}, {in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, {in: `now() = now()`, out: `true`}, {in: `now() <> now()`, out: `false`}, {in: `now() < now() + 1h`, out: `true`}, {in: `now() <= now() + 1h`, out: `true`}, {in: `now() >= now() - 1h`, out: `true`}, {in: `now() > now() - 1h`, out: `true`}, {in: `now() - (now() - 60s)`, out: `1m`}, {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, {in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`}, // Time literals. {in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`}, {in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, {in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`}, {in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, {in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`}, {in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, {in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`}, {in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, {in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, {in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, {in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`}, {in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, // Duration literals. {in: `10m + 1h - 60s`, out: `69m`}, {in: `(10m / 2) * 5`, out: `25m`}, {in: `60s = 1m`, out: `true`}, {in: `60s <> 1m`, out: `false`}, {in: `60s < 1h`, out: `true`}, {in: `60s <= 1h`, out: `true`}, {in: `60s > 12s`, out: `true`}, {in: `60s >= 1m`, out: `true`}, {in: `60s AND 1m`, out: `1m AND 1m`}, {in: `60m / 0`, out: `0s`}, {in: `60m + 50`, out: `1h + 50`}, // String literals. {in: `'foo' + 'bar'`, out: `'foobar'`}, // Variable references. {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, } { // Fold expression. expr := influxql.Reduce(MustParseExpr(tt.in), influxql.MultiValuer( tt.data, &influxql.NowValuer{Now: now}, )) // Compare with expected output. if out := expr.String(); tt.out != out { t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) continue } } } func Test_fieldsNames(t *testing.T) { for _, test := range []struct { in []string out []string alias []string }{ { //case: binary expr(valRef) in: []string{"value+value"}, out: []string{"value", "value"}, alias: []string{"value_value"}, }, { //case: binary expr + valRef in: []string{"value+value", "temperature"}, out: []string{"value", "value", "temperature"}, alias: []string{"value_value", "temperature"}, }, { //case: aggregate expr in: []string{"mean(value)"}, out: []string{"mean"}, alias: []string{"mean"}, }, { //case: binary expr(aggregate expr) in: []string{"mean(value) + max(value)"}, out: []string{"value", "value"}, alias: []string{"mean_max"}, }, { //case: binary expr(aggregate expr) + valRef in: []string{"mean(value) + max(value)", "temperature"}, out: []string{"value", "value", "temperature"}, alias: []string{"mean_max", "temperature"}, }, { //case: mixed aggregate and varRef in: []string{"mean(value) + temperature"}, out: []string{"value", "temperature"}, alias: []string{"mean_temperature"}, }, { //case: ParenExpr(varRef) in: []string{"(value)"}, out: []string{"value"}, alias: []string{"value"}, }, { //case: ParenExpr(varRef + varRef) in: []string{"(value + value)"}, out: []string{"value", "value"}, alias: []string{"value_value"}, }, { //case: ParenExpr(aggregate) in: []string{"(mean(value))"}, out: []string{"value"}, alias: []string{"mean"}, }, { //case: ParenExpr(aggregate + aggregate) in: []string{"(mean(value) + max(value))"}, out: []string{"value", "value"}, alias: []string{"mean_max"}, }, } { fields := influxql.Fields{} for _, s := range test.in { expr := MustParseExpr(s) fields = append(fields, &influxql.Field{Expr: expr}) } got := fields.Names() if !reflect.DeepEqual(got, test.out) { t.Errorf("get fields name:\nexp=%v\ngot=%v\n", test.out, got) } alias := fields.AliasNames() if !reflect.DeepEqual(alias, test.alias) { t.Errorf("get fields alias name:\nexp=%v\ngot=%v\n", test.alias, alias) } } } func TestSelect_ColumnNames(t *testing.T) { for i, tt := range []struct { stmt *influxql.SelectStatement columns []string }{ { stmt: &influxql.SelectStatement{ Fields: influxql.Fields([]*influxql.Field{ {Expr: &influxql.VarRef{Val: "value"}}, }), }, columns: []string{"time", "value"}, }, { stmt: &influxql.SelectStatement{ Fields: influxql.Fields([]*influxql.Field{ {Expr: &influxql.VarRef{Val: "value"}}, {Expr: &influxql.VarRef{Val: "value"}}, {Expr: &influxql.VarRef{Val: "value_1"}}, }), }, columns: []string{"time", "value", "value_1", "value_1_1"}, }, { stmt: &influxql.SelectStatement{ Fields: influxql.Fields([]*influxql.Field{ {Expr: &influxql.VarRef{Val: "value"}}, {Expr: &influxql.VarRef{Val: "value_1"}}, {Expr: &influxql.VarRef{Val: "value"}}, }), }, columns: []string{"time", "value", "value_1", "value_2"}, }, { stmt: &influxql.SelectStatement{ Fields: influxql.Fields([]*influxql.Field{ {Expr: &influxql.VarRef{Val: "value"}}, {Expr: &influxql.VarRef{Val: "total"}, Alias: "value"}, {Expr: &influxql.VarRef{Val: "value"}}, }), }, columns: []string{"time", "value_1", "value", "value_2"}, }, { stmt: &influxql.SelectStatement{ Fields: influxql.Fields([]*influxql.Field{ {Expr: &influxql.VarRef{Val: "value"}}, }), TimeAlias: "timestamp", }, columns: []string{"timestamp", "value"}, }, } { columns := tt.stmt.ColumnNames() if !reflect.DeepEqual(columns, tt.columns) { t.Errorf("%d. expected %s, got %s", i, tt.columns, columns) } } } func TestSelect_Privileges(t *testing.T) { stmt := &influxql.SelectStatement{ Target: &influxql.Target{ Measurement: &influxql.Measurement{Database: "db2"}, }, Sources: []influxql.Source{ &influxql.Measurement{Database: "db0"}, &influxql.Measurement{Database: "db1"}, }, } exp := influxql.ExecutionPrivileges{ influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, } got, err := stmt.RequiredPrivileges() if err != nil { t.Fatal(err) } if !reflect.DeepEqual(exp, got) { t.Errorf("exp: %v, got: %v", exp, got) } } func TestSelect_SubqueryPrivileges(t *testing.T) { stmt := &influxql.SelectStatement{ Target: &influxql.Target{ Measurement: &influxql.Measurement{Database: "db2"}, }, Sources: []influxql.Source{ &influxql.Measurement{Database: "db0"}, &influxql.SubQuery{ Statement: &influxql.SelectStatement{ Sources: []influxql.Source{ &influxql.Measurement{Database: "db1"}, }, }, }, }, } exp := influxql.ExecutionPrivileges{ influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, } got, err := stmt.RequiredPrivileges() if err != nil { t.Fatal(err) } if !reflect.DeepEqual(exp, got) { t.Errorf("exp: %v, got: %v", exp, got) } } func TestShow_Privileges(t *testing.T) { for _, c := range []struct { stmt influxql.Statement exp influxql.ExecutionPrivileges }{ { stmt: &influxql.ShowDatabasesStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.NoPrivileges}}, }, { stmt: &influxql.ShowFieldKeysStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowMeasurementsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowQueriesStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowRetentionPoliciesStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowSeriesStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowShardGroupsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, { stmt: &influxql.ShowShardsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, { stmt: &influxql.ShowStatsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, { stmt: &influxql.ShowSubscriptionsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, { stmt: &influxql.ShowDiagnosticsStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, { stmt: &influxql.ShowTagKeysStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowTagValuesStatement{}, exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, }, { stmt: &influxql.ShowUsersStatement{}, exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, }, } { got, err := c.stmt.RequiredPrivileges() if err != nil { t.Fatal(err) } if !reflect.DeepEqual(c.exp, got) { t.Errorf("exp: %v, got: %v", c.exp, got) } } } func TestBoundParameter_String(t *testing.T) { stmt := &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{{ Expr: &influxql.VarRef{Val: "value"}}}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Condition: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "value"}, RHS: &influxql.BoundParameter{Name: "value"}, }, } if got, exp := stmt.String(), `SELECT value FROM cpu WHERE value > $value`; got != exp { t.Fatalf("stmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", exp, got) } stmt = &influxql.SelectStatement{ IsRawQuery: true, Fields: []*influxql.Field{{ Expr: &influxql.VarRef{Val: "value"}}}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Condition: &influxql.BinaryExpr{ Op: influxql.GT, LHS: &influxql.VarRef{Val: "value"}, RHS: &influxql.BoundParameter{Name: "multi-word value"}, }, } if got, exp := stmt.String(), `SELECT value FROM cpu WHERE value > $"multi-word value"`; got != exp { t.Fatalf("stmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", exp, got) } } // This test checks to ensure that we have given thought to the database // context required for security checks. If a new statement is added, this // test will fail until it is categorized into the correct bucket below. func Test_EnforceHasDefaultDatabase(t *testing.T) { pkg, err := importer.Default().Import("github.com/influxdata/influxql") if err != nil { fmt.Printf("error: %s\n", err.Error()) return } statements := []string{} // this is a list of statements that do not have a database context exemptStatements := []string{ "CreateDatabaseStatement", "CreateUserStatement", "DeleteSeriesStatement", "DropDatabaseStatement", "DropMeasurementStatement", "DropSeriesStatement", "DropShardStatement", "DropUserStatement", "ExplainStatement", "GrantAdminStatement", "KillQueryStatement", "RevokeAdminStatement", "SelectStatement", "SetPasswordUserStatement", "ShowContinuousQueriesStatement", "ShowDatabasesStatement", "ShowDiagnosticsStatement", "ShowGrantsForUserStatement", "ShowQueriesStatement", "ShowShardGroupsStatement", "ShowShardsStatement", "ShowStatsStatement", "ShowSubscriptionsStatement", "ShowUsersStatement", } exists := func(stmt string) bool { switch stmt { // These are functions with the word statement in them, and can be ignored case "Statement", "MustParseStatement", "ParseStatement", "RewriteStatement": return true default: // check the exempt statements for _, s := range exemptStatements { if s == stmt { return true } } // check the statements that passed the interface test for HasDefaultDatabase for _, s := range statements { if s == stmt { return true } } return false } } needsHasDefault := []interface{}{ &influxql.AlterRetentionPolicyStatement{}, &influxql.CreateContinuousQueryStatement{}, &influxql.CreateRetentionPolicyStatement{}, &influxql.CreateSubscriptionStatement{}, &influxql.DeleteStatement{}, &influxql.DropContinuousQueryStatement{}, &influxql.DropRetentionPolicyStatement{}, &influxql.DropSubscriptionStatement{}, &influxql.GrantStatement{}, &influxql.RevokeStatement{}, &influxql.ShowFieldKeysStatement{}, &influxql.ShowFieldKeyCardinalityStatement{}, &influxql.ShowMeasurementCardinalityStatement{}, &influxql.ShowMeasurementsStatement{}, &influxql.ShowRetentionPoliciesStatement{}, &influxql.ShowSeriesStatement{}, &influxql.ShowSeriesCardinalityStatement{}, &influxql.ShowTagKeysStatement{}, &influxql.ShowTagKeyCardinalityStatement{}, &influxql.ShowTagValuesStatement{}, &influxql.ShowTagValuesCardinalityStatement{}, } for _, stmt := range needsHasDefault { statements = append(statements, strings.TrimPrefix(fmt.Sprintf("%T", stmt), "*influxql.")) if _, ok := stmt.(influxql.HasDefaultDatabase); !ok { t.Errorf("%T was expected to declare DefaultDatabase method", stmt) } } for _, declName := range pkg.Scope().Names() { if strings.HasSuffix(declName, "Statement") { if !exists(declName) { t.Errorf("unchecked statement %s. please update this test to determine if this statement needs to declare 'DefaultDatabase'", declName) } } } } // MustTimeRange will parse a time range. Panic on error. func MustTimeRange(expr influxql.Expr) (min, max time.Time) { _, timeRange, err := influxql.ConditionExpr(expr, nil) if err != nil { panic(err) } return timeRange.Min, timeRange.Max } // mustParseTime parses an IS0-8601 string. Panic on error. func mustParseTime(s string) time.Time { t, err := time.Parse(time.RFC3339, s) if err != nil { panic(err.Error()) } return t } // FieldMapper is a mockable implementation of influxql.FieldMapper. type FieldMapper struct { FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) } func (fm *FieldMapper) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { return fm.FieldDimensionsFn(m) } func (fm *FieldMapper) MapType(m *influxql.Measurement, field string) influxql.DataType { f, d, err := fm.FieldDimensions(m) if err != nil { return influxql.Unknown } if typ, ok := f[field]; ok { return typ } if _, ok := d[field]; ok { return influxql.Tag } return influxql.Unknown } func (fm *FieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { switch name { case "mean", "median", "integral", "stddev": return influxql.Float, nil case "count": return influxql.Integer, nil case "elapsed": return influxql.Integer, nil default: return args[0], nil } } // BenchmarkExprNames benchmarks how long it takes to run ExprNames. func BenchmarkExprNames(b *testing.B) { exprs := make([]string, 100) for i := range exprs { exprs[i] = fmt.Sprintf("host = 'server%02d'", i) } condition := MustParseExpr(strings.Join(exprs, " OR ")) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { refs := influxql.ExprNames(condition) if have, want := refs, []influxql.VarRef{{Val: "host"}}; !reflect.DeepEqual(have, want) { b.Fatalf("unexpected expression names: have=%s want=%s", have, want) } } } type FunctionValuer struct{} var _ influxql.CallValuer = FunctionValuer{} func (FunctionValuer) Value(key string) (interface{}, bool) { return nil, false } func (FunctionValuer) Call(name string, args []interface{}) (interface{}, bool) { switch name { case "abs": arg0 := args[0].(float64) return math.Abs(arg0), true case "pow": arg0, arg1 := args[0].(float64), args[1].(int64) return math.Pow(arg0, float64(arg1)), true default: return nil, false } } // BenchmarkEval benchmarks how long it takes to run Eval. func BenchmarkEval(b *testing.B) { expr := MustParseExpr(`f1 + abs(f2) / pow(f3, 3)`) valuer := influxql.ValuerEval{ Valuer: influxql.MultiValuer( influxql.MapValuer(map[string]interface{}{ "f1": float64(15), "f2": float64(-3), "f3": float64(2), }), FunctionValuer{}, ), } b.ReportAllocs() for i := 0; i < b.N; i++ { valuer.Eval(expr) } } influxql-1.1.0/doc.go000066400000000000000000000005741363177076200144710ustar00rootroot00000000000000/* Package influxql implements a parser for the InfluxDB query language. InfluxQL is a DML and DDL language for the InfluxDB time series database. It provides the ability to query for aggregate statistics as well as create and configure the InfluxDB server. See https://docs.influxdata.com/influxdb/latest/query_language/ for a reference on using InfluxQL. */ package influxql influxql-1.1.0/influxql.go000066400000000000000000000001671363177076200155640ustar00rootroot00000000000000package influxql // import "github.com/influxdata/influxql" //go:generate protoc --gogo_out=. internal/internal.proto influxql-1.1.0/internal/000077500000000000000000000000001363177076200152035ustar00rootroot00000000000000influxql-1.1.0/internal/internal.pb.go000066400000000000000000000103601363177076200177460ustar00rootroot00000000000000// Code generated by protoc-gen-gogo. // source: internal/internal.proto // DO NOT EDIT! /* Package influxql is a generated protocol buffer package. It is generated from these files: internal/internal.proto It has these top-level messages: Measurements Measurement */ package influxql import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Measurements struct { Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Measurements) Reset() { *m = Measurements{} } func (m *Measurements) String() string { return proto.CompactTextString(m) } func (*Measurements) ProtoMessage() {} func (*Measurements) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{0} } func (m *Measurements) GetItems() []*Measurement { if m != nil { return m.Items } return nil } type Measurement struct { Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Measurement) Reset() { *m = Measurement{} } func (m *Measurement) String() string { return proto.CompactTextString(m) } func (*Measurement) ProtoMessage() {} func (*Measurement) Descriptor() ([]byte, []int) { return fileDescriptorInternal, []int{1} } func (m *Measurement) GetDatabase() string { if m != nil && m.Database != nil { return *m.Database } return "" } func (m *Measurement) GetRetentionPolicy() string { if m != nil && m.RetentionPolicy != nil { return *m.RetentionPolicy } return "" } func (m *Measurement) GetName() string { if m != nil && m.Name != nil { return *m.Name } return "" } func (m *Measurement) GetRegex() string { if m != nil && m.Regex != nil { return *m.Regex } return "" } func (m *Measurement) GetIsTarget() bool { if m != nil && m.IsTarget != nil { return *m.IsTarget } return false } func init() { proto.RegisterType((*Measurements)(nil), "influxql.Measurements") proto.RegisterType((*Measurement)(nil), "influxql.Measurement") } func init() { proto.RegisterFile("internal/internal.proto", fileDescriptorInternal) } var fileDescriptorInternal = []byte{ // 195 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x87, 0x31, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x38, 0x32, 0xf3, 0xd2, 0x72, 0x4a, 0x2b, 0x0a, 0x73, 0x94, 0xac, 0xb9, 0x78, 0x7c, 0x53, 0x13, 0x8b, 0x4b, 0x8b, 0x52, 0x73, 0x53, 0xf3, 0x4a, 0x8a, 0x85, 0xb4, 0xb9, 0x58, 0x3d, 0x4b, 0x52, 0x73, 0x8b, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0x44, 0xf5, 0x60, 0x2a, 0xf5, 0x90, 0x94, 0x05, 0x41, 0xd4, 0x28, 0xcd, 0x64, 0xe4, 0xe2, 0x46, 0x12, 0x16, 0x92, 0xe2, 0xe2, 0x70, 0x49, 0x2c, 0x49, 0x4c, 0x4a, 0x2c, 0x4e, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x82, 0xf3, 0x85, 0x34, 0xb8, 0xf8, 0x83, 0x52, 0x4b, 0x52, 0xf3, 0x4a, 0x32, 0xf3, 0xf3, 0x02, 0xf2, 0x73, 0x32, 0x93, 0x2b, 0x25, 0x98, 0xc0, 0x4a, 0xd0, 0x85, 0x85, 0x84, 0xb8, 0x58, 0xfc, 0x12, 0x73, 0x53, 0x25, 0x98, 0xc1, 0xd2, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, 0x50, 0x6a, 0x7a, 0x6a, 0x85, 0x04, 0x0b, 0x58, 0x10, 0xc2, 0x01, 0xd9, 0xe7, 0x59, 0x1c, 0x92, 0x58, 0x94, 0x9e, 0x5a, 0x22, 0xc1, 0xaa, 0xc0, 0xa8, 0xc1, 0x11, 0x04, 0xe7, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x16, 0x06, 0x23, 0xfc, 0x00, 0x00, 0x00, } influxql-1.1.0/internal/internal.proto000066400000000000000000000005111363177076200201010ustar00rootroot00000000000000syntax = "proto2"; package influxql; message Measurements { repeated Measurement Items = 1; } message Measurement { optional string Database = 1; optional string RetentionPolicy = 2; optional string Name = 3; optional string Regex = 4; optional bool IsTarget = 5; } influxql-1.1.0/params.go000066400000000000000000000103131363177076200151770ustar00rootroot00000000000000package influxql import ( "encoding/json" "fmt" "strconv" "strings" "time" ) // Value represents a value that can be bound // to a parameter when parsing the query. type Value interface { TokenType() Token Value() string } type ( // Identifier is an identifier value. Identifier string // StringValue is a string literal. StringValue string // RegexValue is a regexp literal. RegexValue string // NumberValue is a number literal. NumberValue float64 // IntegerValue is an integer literal. IntegerValue int64 // BooleanValue is a boolean literal. BooleanValue bool // DurationValue is a duration literal. DurationValue string // ErrorValue is a special value that returns an error during parsing // when it is used. ErrorValue string ) // BindValue will bind an interface value to its influxql value. // This method of binding values only supports literals. func BindValue(v interface{}) Value { if jv, ok := v.(json.Number); ok { var err error v, err = jsonNumberToValue(jv) if err != nil { return ErrorValue(err.Error()) } } switch v := v.(type) { case float64: return NumberValue(v) case int64: return IntegerValue(v) case string: return StringValue(v) case bool: return BooleanValue(v) case map[string]interface{}: return bindObjectValue(v) default: s := fmt.Sprintf("unable to bind parameter with type %T", v) return ErrorValue(s) } } // bindObjectValue will bind an object to a value. func bindObjectValue(m map[string]interface{}) Value { if len(m) != 1 { return ErrorValue("bound object parameter value must have exactly one entry") } var ( k string v interface{} ) for k, v = range m { // Nothing done here. } if jv, ok := v.(json.Number); ok { var err error v, err = jsonNumberToValue(jv) if err != nil { return ErrorValue(err.Error()) } } switch k { case "ident", "identifier": s, ok := v.(string) if !ok { return ErrorValue("identifier must be a string value") } return Identifier(s) case "regex": s, ok := v.(string) if !ok { return ErrorValue("regex literal must be a string value") } return RegexValue(s) case "string": s, ok := v.(string) if !ok { return ErrorValue("string literal must be a string value") } return StringValue(s) case "float", "number": switch f := v.(type) { case float64: return NumberValue(f) case int64: return NumberValue(f) default: return ErrorValue("number literal must be a float value") } case "int", "integer": i, ok := v.(int64) if !ok { return ErrorValue("integer literal must be an integer value") } return IntegerValue(i) case "duration": switch d := v.(type) { case string: return DurationValue(d) case int64: return DurationValue(FormatDuration(time.Duration(d))) default: return ErrorValue("duration literal must be a string or integer value") } default: return ErrorValue(fmt.Sprintf("unknown bind object type: %s", k)) } } func (v Identifier) TokenType() Token { return IDENT } func (v Identifier) Value() string { return string(v) } func (v StringValue) TokenType() Token { return STRING } func (v StringValue) Value() string { return string(v) } func (v RegexValue) TokenType() Token { return REGEX } func (v RegexValue) Value() string { return string(v) } func (v NumberValue) TokenType() Token { return NUMBER } func (v NumberValue) Value() string { return strconv.FormatFloat(float64(v), 'f', -1, 64) } func (v IntegerValue) TokenType() Token { return INTEGER } func (v IntegerValue) Value() string { return strconv.FormatInt(int64(v), 10) } func (v BooleanValue) TokenType() Token { if v { return TRUE } else { return FALSE } } func (v BooleanValue) Value() string { return "" } func (v DurationValue) TokenType() Token { return DURATIONVAL } func (v DurationValue) Value() string { return string(v) } func (e ErrorValue) TokenType() Token { return BOUNDPARAM } func (e ErrorValue) Value() string { return string(e) } func jsonNumberToValue(v json.Number) (interface{}, error) { if strings.Contains(string(v), ".") { f, err := v.Float64() if err != nil { return nil, err } return f, nil } else { i, err := v.Int64() if err != nil { return nil, err } return i, nil } } influxql-1.1.0/parse_tree.go000066400000000000000000000166311363177076200160560ustar00rootroot00000000000000package influxql import ( "fmt" ) var Language = &ParseTree{} type ParseTree struct { Handlers map[Token]func(*Parser) (Statement, error) Tokens map[Token]*ParseTree Keys []string } // With passes the current parse tree to a function to allow nested functions. func (t *ParseTree) With(fn func(*ParseTree)) { fn(t) } // Group groups together a set of related handlers with a common token prefix. func (t *ParseTree) Group(tokens ...Token) *ParseTree { for _, tok := range tokens { // Look for the parse tree for this token. if subtree := t.Tokens[tok]; subtree != nil { t = subtree continue } // No subtree exists yet. Verify that we don't have a conflicting // statement. if _, conflict := t.Handlers[tok]; conflict { panic(fmt.Sprintf("conflict for token %s", tok)) } // Create the new parse tree and register it inside of this one for // later reference. newT := &ParseTree{} if t.Tokens == nil { t.Tokens = make(map[Token]*ParseTree) } t.Tokens[tok] = newT t.Keys = append(t.Keys, tok.String()) t = newT } return t } // Handle registers a handler to be invoked when seeing the given token. func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) { // Verify that there is no conflict for this token in this parse tree. if _, conflict := t.Tokens[tok]; conflict { panic(fmt.Sprintf("conflict for token %s", tok)) } if _, conflict := t.Handlers[tok]; conflict { panic(fmt.Sprintf("conflict for token %s", tok)) } if t.Handlers == nil { t.Handlers = make(map[Token]func(*Parser) (Statement, error)) } t.Handlers[tok] = fn t.Keys = append(t.Keys, tok.String()) } // Parse parses a statement using the language defined in the parse tree. func (t *ParseTree) Parse(p *Parser) (Statement, error) { for { tok, pos, lit := p.ScanIgnoreWhitespace() if subtree := t.Tokens[tok]; subtree != nil { t = subtree continue } if stmt := t.Handlers[tok]; stmt != nil { return stmt(p) } // There were no registered handlers. Return the valid tokens in the order they were added. return nil, newParseError(tokstr(tok, lit), t.Keys, pos) } } func (t *ParseTree) Clone() *ParseTree { newT := &ParseTree{} if t.Handlers != nil { newT.Handlers = make(map[Token]func(*Parser) (Statement, error), len(t.Handlers)) for tok, handler := range t.Handlers { newT.Handlers[tok] = handler } } if t.Tokens != nil { newT.Tokens = make(map[Token]*ParseTree, len(t.Tokens)) for tok, subtree := range t.Tokens { newT.Tokens[tok] = subtree.Clone() } } return newT } func init() { Language.Handle(SELECT, func(p *Parser) (Statement, error) { return p.parseSelectStatement(targetNotRequired) }) Language.Handle(DELETE, func(p *Parser) (Statement, error) { return p.parseDeleteStatement() }) Language.Group(SHOW).With(func(show *ParseTree) { show.Group(CONTINUOUS).Handle(QUERIES, func(p *Parser) (Statement, error) { return p.parseShowContinuousQueriesStatement() }) show.Handle(DATABASES, func(p *Parser) (Statement, error) { return p.parseShowDatabasesStatement() }) show.Handle(DIAGNOSTICS, func(p *Parser) (Statement, error) { return p.parseShowDiagnosticsStatement() }) show.Group(FIELD).With(func(field *ParseTree) { field.Handle(KEY, func(p *Parser) (Statement, error) { return p.parseShowFieldKeyCardinalityStatement() }) field.Handle(KEYS, func(p *Parser) (Statement, error) { return p.parseShowFieldKeysStatement() }) }) show.Group(GRANTS).Handle(FOR, func(p *Parser) (Statement, error) { return p.parseGrantsForUserStatement() }) show.Group(MEASUREMENT).Handle(EXACT, func(p *Parser) (Statement, error) { return p.parseShowMeasurementCardinalityStatement(true) }) show.Group(MEASUREMENT).Handle(CARDINALITY, func(p *Parser) (Statement, error) { return p.parseShowMeasurementCardinalityStatement(false) }) show.Handle(MEASUREMENTS, func(p *Parser) (Statement, error) { return p.parseShowMeasurementsStatement() }) show.Handle(QUERIES, func(p *Parser) (Statement, error) { return p.parseShowQueriesStatement() }) show.Group(RETENTION).Handle(POLICIES, func(p *Parser) (Statement, error) { return p.parseShowRetentionPoliciesStatement() }) show.Handle(SERIES, func(p *Parser) (Statement, error) { return p.parseShowSeriesStatement() }) show.Group(SHARD).Handle(GROUPS, func(p *Parser) (Statement, error) { return p.parseShowShardGroupsStatement() }) show.Handle(SHARDS, func(p *Parser) (Statement, error) { return p.parseShowShardsStatement() }) show.Handle(STATS, func(p *Parser) (Statement, error) { return p.parseShowStatsStatement() }) show.Handle(SUBSCRIPTIONS, func(p *Parser) (Statement, error) { return p.parseShowSubscriptionsStatement() }) show.Group(TAG).With(func(tag *ParseTree) { tag.Handle(KEY, func(p *Parser) (Statement, error) { return p.parseShowTagKeyCardinalityStatement() }) tag.Handle(KEYS, func(p *Parser) (Statement, error) { return p.parseShowTagKeysStatement() }) tag.Handle(VALUES, func(p *Parser) (Statement, error) { return p.parseShowTagValuesStatement() }) }) show.Handle(USERS, func(p *Parser) (Statement, error) { return p.parseShowUsersStatement() }) }) Language.Group(CREATE).With(func(create *ParseTree) { create.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) { return p.parseCreateContinuousQueryStatement() }) create.Handle(DATABASE, func(p *Parser) (Statement, error) { return p.parseCreateDatabaseStatement() }) create.Handle(USER, func(p *Parser) (Statement, error) { return p.parseCreateUserStatement() }) create.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { return p.parseCreateRetentionPolicyStatement() }) create.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) { return p.parseCreateSubscriptionStatement() }) }) Language.Group(DROP).With(func(drop *ParseTree) { drop.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) { return p.parseDropContinuousQueryStatement() }) drop.Handle(DATABASE, func(p *Parser) (Statement, error) { return p.parseDropDatabaseStatement() }) drop.Handle(MEASUREMENT, func(p *Parser) (Statement, error) { return p.parseDropMeasurementStatement() }) drop.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { return p.parseDropRetentionPolicyStatement() }) drop.Handle(SERIES, func(p *Parser) (Statement, error) { return p.parseDropSeriesStatement() }) drop.Handle(SHARD, func(p *Parser) (Statement, error) { return p.parseDropShardStatement() }) drop.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) { return p.parseDropSubscriptionStatement() }) drop.Handle(USER, func(p *Parser) (Statement, error) { return p.parseDropUserStatement() }) }) Language.Handle(EXPLAIN, func(p *Parser) (Statement, error) { return p.parseExplainStatement() }) Language.Handle(GRANT, func(p *Parser) (Statement, error) { return p.parseGrantStatement() }) Language.Handle(REVOKE, func(p *Parser) (Statement, error) { return p.parseRevokeStatement() }) Language.Group(ALTER, RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { return p.parseAlterRetentionPolicyStatement() }) Language.Group(SET, PASSWORD).Handle(FOR, func(p *Parser) (Statement, error) { return p.parseSetPasswordUserStatement() }) Language.Group(KILL).Handle(QUERY, func(p *Parser) (Statement, error) { return p.parseKillQueryStatement() }) } influxql-1.1.0/parse_tree_test.go000066400000000000000000000017611363177076200171130ustar00rootroot00000000000000package influxql_test import ( "reflect" "strings" "testing" "github.com/influxdata/influxql" ) func TestParseTree_Clone(t *testing.T) { // Clone the default language parse tree and add a new syntax node. language := influxql.Language.Clone() language.Group(influxql.CREATE).Handle(influxql.STATS, func(p *influxql.Parser) (influxql.Statement, error) { return &influxql.ShowStatsStatement{}, nil }) // Create a parser with CREATE STATS and parse the statement. parser := influxql.NewParser(strings.NewReader(`CREATE STATS`)) stmt, err := language.Parse(parser) if err != nil { t.Fatalf("unexpected error: %s", err) } else if !reflect.DeepEqual(stmt, &influxql.ShowStatsStatement{}) { t.Fatalf("unexpected statement returned from parser: %s", stmt) } // Recreate the parser and try parsing with the original parsing. This should fail. parser = influxql.NewParser(strings.NewReader(`CREATE STATS`)) if _, err := parser.ParseStatement(); err == nil { t.Fatal("expected error") } } influxql-1.1.0/parser.go000066400000000000000000002432721363177076200152240ustar00rootroot00000000000000package influxql import ( "bytes" "errors" "fmt" "io" "math" "regexp" "strconv" "strings" "time" ) const ( // DateFormat represents the format for date literals. DateFormat = "2006-01-02" // DateTimeFormat represents the format for date time literals. DateTimeFormat = "2006-01-02 15:04:05.999999" ) // Parser represents an InfluxQL parser. type Parser struct { s *bufScanner params map[string]Value } // NewParser returns a new instance of Parser. func NewParser(r io.Reader) *Parser { return &Parser{s: newBufScanner(r)} } // SetParams sets the parameters that will be used for any bound parameter substitutions. func (p *Parser) SetParams(params map[string]interface{}) { p.params = make(map[string]Value, len(params)) for name, param := range params { p.params[name] = BindValue(param) } } // ParseQuery parses a query string and returns its AST representation. func ParseQuery(s string) (*Query, error) { return NewParser(strings.NewReader(s)).ParseQuery() } // ParseStatement parses a statement string and returns its AST representation. func ParseStatement(s string) (Statement, error) { return NewParser(strings.NewReader(s)).ParseStatement() } // MustParseStatement parses a statement string and returns its AST. Panic on error. func MustParseStatement(s string) Statement { stmt, err := ParseStatement(s) if err != nil { panic(err.Error()) } return stmt } // ParseExpr parses an expression string and returns its AST representation. func ParseExpr(s string) (Expr, error) { return NewParser(strings.NewReader(s)).ParseExpr() } // MustParseExpr parses an expression string and returns its AST. Panic on error. func MustParseExpr(s string) Expr { expr, err := ParseExpr(s) if err != nil { panic(err.Error()) } return expr } // ParseQuery parses an InfluxQL string and returns a Query AST object. func (p *Parser) ParseQuery() (*Query, error) { var statements Statements semi := true for { if tok, pos, lit := p.ScanIgnoreWhitespace(); tok == EOF { return &Query{Statements: statements}, nil } else if tok == SEMICOLON { semi = true } else { if !semi { return nil, newParseError(tokstr(tok, lit), []string{";"}, pos) } p.Unscan() s, err := p.ParseStatement() if err != nil { return nil, err } statements = append(statements, s) semi = false } } } // ParseStatement parses an InfluxQL string and returns a Statement AST object. func (p *Parser) ParseStatement() (Statement, error) { return Language.Parse(p) } // parseSetPasswordUserStatement parses a string and returns a set statement. // This function assumes the SET token has already been consumed. func (p *Parser) parseSetPasswordUserStatement() (*SetPasswordUserStatement, error) { stmt := &SetPasswordUserStatement{} // Parse username ident, err := p.ParseIdent() if err != nil { return nil, err } stmt.Name = ident // Consume the required = token. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != EQ { return nil, newParseError(tokstr(tok, lit), []string{"="}, pos) } // Parse new user's password if ident, err = p.parseString(); err != nil { return nil, err } stmt.Password = ident return stmt, nil } // parseKillQueryStatement parses a string and returns a kill statement. // This function assumes the KILL token has already been consumed. func (p *Parser) parseKillQueryStatement() (*KillQueryStatement, error) { qid, err := p.ParseUInt64() if err != nil { return nil, err } var host string if tok, _, _ := p.ScanIgnoreWhitespace(); tok == ON { host, err = p.ParseIdent() if err != nil { return nil, err } } else { p.Unscan() } return &KillQueryStatement{QueryID: qid, Host: host}, nil } // parseCreateSubscriptionStatement parses a string and returns a CreateSubscriptionStatement. // This function assumes the "CREATE SUBSCRIPTION" tokens have already been consumed. func (p *Parser) parseCreateSubscriptionStatement() (*CreateSubscriptionStatement, error) { stmt := &CreateSubscriptionStatement{} // Read the id of the subscription to create. ident, err := p.ParseIdent() if err != nil { return nil, err } stmt.Name = ident // Expect an "ON" keyword. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != ON { return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } // Read the name of the database. if ident, err = p.ParseIdent(); err != nil { return nil, err } stmt.Database = ident if tok, pos, lit := p.Scan(); tok != DOT { return nil, newParseError(tokstr(tok, lit), []string{"."}, pos) } // Read the name of the retention policy. if ident, err = p.ParseIdent(); err != nil { return nil, err } stmt.RetentionPolicy = ident // Expect a "DESTINATIONS" keyword. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != DESTINATIONS { return nil, newParseError(tokstr(tok, lit), []string{"DESTINATIONS"}, pos) } // Expect one of "ANY ALL" keywords. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok == ALL || tok == ANY { stmt.Mode = tokens[tok] } else { return nil, newParseError(tokstr(tok, lit), []string{"ALL", "ANY"}, pos) } // Read list of destinations. var destinations []string if destinations, err = p.parseStringList(); err != nil { return nil, err } stmt.Destinations = destinations return stmt, nil } // parseCreateRetentionPolicyStatement parses a string and returns a create retention policy statement. // This function assumes the CREATE RETENTION POLICY tokens have already been consumed. func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicyStatement, error) { stmt := &CreateRetentionPolicyStatement{} // Parse the retention policy name. ident, err := p.ParseIdent() if err != nil { return nil, err } stmt.Name = ident // Consume the required ON token. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != ON { return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } // Parse the database name. ident, err = p.ParseIdent() if err != nil { return nil, err } stmt.Database = ident // Parse required DURATION token. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != DURATION { return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) } // Parse duration value d, err := p.ParseDuration() if err != nil { return nil, err } stmt.Duration = d // Parse required REPLICATION token. if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != REPLICATION { return nil, newParseError(tokstr(tok, lit), []string{"REPLICATION"}, pos) } // Parse replication value. n, err := p.ParseInt(1, math.MaxInt32) if err != nil { return nil, err } stmt.Replication = n // Parse optional SHARD token. if tok, _, _ := p.ScanIgnoreWhitespace(); tok == SHARD { if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != DURATION { return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) } // Check to see if they used the INF keyword tok, pos, _ := p.ScanIgnoreWhitespace() if tok == INF { return nil, &ParseError{ Message: "invalid duration INF for shard duration", Pos: pos, } } p.Unscan() d, err := p.ParseDuration() if err != nil { return nil, err } stmt.ShardGroupDuration = d } else { p.Unscan() } // Parse optional DEFAULT token. if tok, _, _ := p.ScanIgnoreWhitespace(); tok == DEFAULT { stmt.Default = true } else { p.Unscan() } return stmt, nil } // parseAlterRetentionPolicyStatement parses a string and returns an alter retention policy statement. // This function assumes the ALTER RETENTION POLICY tokens have already been consumed. func (p *Parser) parseAlterRetentionPolicyStatement() (*AlterRetentionPolicyStatement, error) { stmt := &AlterRetentionPolicyStatement{} // Parse the retention policy name. tok, pos, lit := p.ScanIgnoreWhitespace() if tok == DEFAULT { stmt.Name = "default" } else if tok == IDENT { stmt.Name = lit } else { return nil, newParseError(tokstr(tok, lit), []string{"identifier"}, pos) } // Consume the required ON token. if tok, pos, lit = p.ScanIgnoreWhitespace(); tok != ON { return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } // Parse the database name. ident, err := p.ParseIdent() if err != nil { return nil, err } stmt.Database = ident // Loop through option tokens (DURATION, REPLICATION, SHARD DURATION, DEFAULT, etc.). found := make(map[Token]struct{}) Loop: for { tok, pos, lit := p.ScanIgnoreWhitespace() if _, ok := found[tok]; ok { return nil, &ParseError{ Message: fmt.Sprintf("found duplicate %s option", tok), Pos: pos, } } switch tok { case DURATION: d, err := p.ParseDuration() if err != nil { return nil, err } stmt.Duration = &d case REPLICATION: n, err := p.ParseInt(1, math.MaxInt32) if err != nil { return nil, err } stmt.Replication = &n case SHARD: tok, pos, lit := p.ScanIgnoreWhitespace() if tok == DURATION { // Check to see if they used the INF keyword tok, pos, _ := p.ScanIgnoreWhitespace() if tok == INF { return nil, &ParseError{ Message: "invalid duration INF for shard duration", Pos: pos, } } p.Unscan() d, err := p.ParseDuration() if err != nil { return nil, err } stmt.ShardGroupDuration = &d } else { return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) } case DEFAULT: stmt.Default = true default: if len(found) == 0 { return nil, newParseError(tokstr(tok, lit), []string{"DURATION", "REPLICATION", "SHARD", "DEFAULT"}, pos) } p.Unscan() break Loop } found[tok] = struct{}{} } return stmt, nil } // ParseInt parses a string representing a base 10 integer and returns the number. // It returns an error if the parsed number is outside the range [min, max]. func (p *Parser) ParseInt(min, max int) (int, error) { tok, pos, lit := p.ScanIgnoreWhitespace() if tok != INTEGER { return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Convert string to int. n, err := strconv.Atoi(lit) if err != nil { return 0, &ParseError{Message: err.Error(), Pos: pos} } else if min > n || n > max { return 0, &ParseError{ Message: fmt.Sprintf("invalid value %d: must be %d <= n <= %d", n, min, max), Pos: pos, } } return n, nil } // ParseUInt64 parses a string and returns a 64-bit unsigned integer literal. func (p *Parser) ParseUInt64() (uint64, error) { tok, pos, lit := p.ScanIgnoreWhitespace() if tok != INTEGER { return 0, newParseError(tokstr(tok, lit), []string{"integer"}, pos) } // Convert string to unsigned 64-bit integer n, err := strconv.ParseUint(lit, 10, 64) if err != nil { return 0, &ParseError{Message: err.Error(), Pos: pos} } return uint64(n), nil } // ParseDuration parses a string and returns a duration literal. // This function assumes the DURATION token has already been consumed. func (p *Parser) ParseDuration() (time.Duration, error) { tok, pos, lit := p.ScanIgnoreWhitespace() if tok != DURATIONVAL && tok != INF { return 0, newParseError(tokstr(tok, lit), []string{"duration"}, pos) } if tok == INF { return 0, nil } d, err := ParseDuration(lit) if err != nil { return 0, &ParseError{Message: err.Error(), Pos: pos} } return d, nil } // ParseIdent parses an identifier. func (p *Parser) ParseIdent() (string, error) { tok, pos, lit := p.ScanIgnoreWhitespace() if tok != IDENT { return "", newParseError(tokstr(tok, lit), []string{"identifier"}, pos) } return lit, nil } // ParseIdentList parses a comma delimited list of identifiers. func (p *Parser) ParseIdentList() ([]string, error) { // Parse first (required) identifier. ident, err := p.ParseIdent() if err != nil { return nil, err } idents := []string{ident} // Parse remaining (optional) identifiers. for { if tok, _, _ := p.ScanIgnoreWhitespace(); tok != COMMA { p.Unscan() return idents, nil } if ident, err = p.ParseIdent(); err != nil { return nil, err } idents = append(idents, ident) } } // parseSegmentedIdents parses a segmented identifiers. // e.g., "db"."rp".measurement or "db"..measurement func (p *Parser) parseSegmentedIdents() ([]string, error) { ident, err := p.ParseIdent() if err != nil { return nil, err } idents := []string{ident} // Parse remaining (optional) identifiers. for { if tok, _, _ := p.Scan(); tok != DOT { // No more segments so we're done. p.Unscan() break } if ch := p.peekRune(); ch == '/' { // Next segment is a regex so we're done. break } else if ch == ':' { // Next segment is context-specific so let caller handle it. break } else if ch == '.' { // Add an empty identifier. idents = append(idents, "") continue } // Parse the next identifier. if ident, err = p.ParseIdent(); err != nil { return nil, err } idents = append(idents, ident) } if len(idents) > 3 { msg := fmt.Sprintf("too many segments in %s", QuoteIdent(idents...)) return nil, &ParseError{Message: msg} } return idents, nil } // parseString parses a string. func (p *Parser) parseString() (string, error) { tok, pos, lit := p.ScanIgnoreWhitespace() if tok != STRING { return "", newParseError(tokstr(tok, lit), []string{"string"}, pos) } return lit, nil } // parseStringList parses a list of strings separated by commas. func (p *Parser) parseStringList() ([]string, error) { // Parse first (required) string. str, err := p.parseString() if err != nil { return nil, err } strs := []string{str} // Parse remaining (optional) strings. for { if tok, _, _ := p.ScanIgnoreWhitespace(); tok != COMMA { p.Unscan() return strs, nil } if str, err = p.parseString(); err != nil { return nil, err } strs = append(strs, str) } } // parseRevokeStatement parses a string and returns a revoke statement. // This function assumes the REVOKE token has already been consumed. func (p *Parser) parseRevokeStatement() (Statement, error) { // Parse the privilege to be revoked. priv, err := p.parsePrivilege() if err != nil { return nil, err } // Check for ON or FROM clauses. tok, pos, lit := p.ScanIgnoreWhitespace() if tok == ON { stmt, err := p.parseRevokeOnStatement() if err != nil { return nil, err } stmt.Privilege = priv return stmt, nil } else if tok == FROM { // Admin privilege is only revoked on ALL PRIVILEGES. if priv != AllPrivileges { return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } return p.parseRevokeAdminStatement() } // Only ON or FROM clauses are allowed after privilege. if priv == AllPrivileges { return nil, newParseError(tokstr(tok, lit), []string{"ON", "FROM"}, pos) } return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } // parseRevokeOnStatement parses a string and returns a revoke statement. // This function assumes the [PRIVILEGE] ON tokens have already been consumed. func (p *Parser) parseRevokeOnStatement() (*RevokeStatement, error) { stmt := &RevokeStatement{} // Parse the name of the database. lit, err := p.ParseIdent() if err != nil { return nil, err } stmt.On = lit // Parse FROM clause. tok, pos, lit := p.ScanIgnoreWhitespace() // Check for required FROM token. if tok != FROM { return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) } // Parse the name of the user. lit, err = p.ParseIdent() if err != nil { return nil, err } stmt.User = lit return stmt, nil } // parseRevokeAdminStatement parses a string and returns a revoke admin statement. // This function assumes the ALL [PRVILEGES] FROM token has already been consumed. func (p *Parser) parseRevokeAdminStatement() (*RevokeAdminStatement, error) { // Admin privilege is always false when revoke admin clause is called. stmt := &RevokeAdminStatement{} // Parse the name of the user. lit, err := p.ParseIdent() if err != nil { return nil, err } stmt.User = lit return stmt, nil } // parseGrantStatement parses a string and returns a grant statement. // This function assumes the GRANT token has already been consumed. func (p *Parser) parseGrantStatement() (Statement, error) { // Parse the privilege to be granted. priv, err := p.parsePrivilege() if err != nil { return nil, err } // Check for ON or TO clauses. tok, pos, lit := p.ScanIgnoreWhitespace() if tok == ON { stmt, err := p.parseGrantOnStatement() if err != nil { return nil, err } stmt.Privilege = priv return stmt, nil } else if tok == TO { // Admin privilege is only granted on ALL PRIVILEGES. if priv != AllPrivileges { return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } return p.parseGrantAdminStatement() } // Only ON or TO clauses are allowed after privilege. if priv == AllPrivileges { return nil, newParseError(tokstr(tok, lit), []string{"ON", "TO"}, pos) } return nil, newParseError(tokstr(tok, lit), []string{"ON"}, pos) } // parseGrantOnStatement parses a string and returns a grant statement. // This function assumes the [PRIVILEGE] ON tokens have already been consumed. func (p *Parser) parseGrantOnStatement() (*GrantStatement, error) { stmt := &GrantStatement{} // Parse the name of the database. lit, err := p.ParseIdent() if err != nil { return nil, err } stmt.On = lit // Parse TO clause. tok, pos, lit := p.ScanIgnoreWhitespace() // Check for required TO token. if tok != TO { return nil, newParseError(tokstr(tok, lit), []string{"TO"}, pos) } // Parse the name of the user. lit, err = p.ParseIdent() if err != nil { return nil, err } stmt.User = lit return stmt, nil } // parseGrantAdminStatement parses a string and returns a grant admin statement. // This function assumes the ALL [PRVILEGES] TO tokens have already been consumed. func (p *Parser) parseGrantAdminStatement() (*GrantAdminStatement, error) { // Admin privilege is always true when grant admin clause is called. stmt := &GrantAdminStatement{} // Parse the name of the user. lit, err := p.ParseIdent() if err != nil { return nil, err } stmt.User = lit return stmt, nil } // parsePrivilege parses a string and returns a Privilege. func (p *Parser) parsePrivilege() (Privilege, error) { tok, pos, lit := p.ScanIgnoreWhitespace() switch tok { case READ: return ReadPrivilege, nil case WRITE: return WritePrivilege, nil case ALL: // Consume optional PRIVILEGES token tok, pos, lit = p.ScanIgnoreWhitespace() if tok != PRIVILEGES { p.Unscan() } return AllPrivileges, nil } return 0, newParseError(tokstr(tok, lit), []string{"READ", "WRITE", "ALL [PRIVILEGES]"}, pos) } // parseSelectStatement parses a select string and returns a Statement AST object. // This function assumes the SELECT token has already been consumed. func (p *Parser) parseSelectStatement(tr targetRequirement) (*SelectStatement, error) { stmt := &SelectStatement{} var err error // Parse fields: "FIELD+". if stmt.Fields, err = p.parseFields(); err != nil { return nil, err } // Parse target: "INTO" if stmt.Target, err = p.parseTarget(tr); err != nil { return nil, err } // Parse source: "FROM". if tok, pos, lit := p.ScanIgnoreWhitespace(); tok != FROM { return nil, newParseError(tokstr(tok, lit), []string{"FROM"}, pos) } if stmt.Sources, err = p.parseSources(true); err != nil { return nil, err } // Parse condition: "WHERE EXPR". if stmt.Condition, err = p.parseCondition(); err != nil { return nil, err } // Parse dimensions: "GROUP BY DIMENSION+". if stmt.Dimensions, err = p.parseDimensions(); err != nil { return nil, err } // Parse fill options: "fill(